[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 3/4] Convert map_domain_page() to use the new mfn_t type



Reworked the internals and declaration, applying (un)boxing
where needed. Converted calls to map_domain_page() to
provide mfn_t types, boxing where needed.

Signed-off-by: Ben Catterall <Ben.Catterall@xxxxxxxxxx>
Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>

---
Changed since v1:
   * Created paddr_to_mfn() and mfn_to_paddr() for both x86 and ARM
   * Converted code to use the new paddr_to_mfn() rather than e.g.
     paddr>>PAGE_SHIFT

Changed since v2:
   * Switch to using paddr_to_pfn() and pfn_to_addr().
   * Removed paddr_to_mfn() and mfn_to_paddr()
   * Added missing blank line
---
 xen/arch/arm/domain_build.c               |  2 +-
 xen/arch/arm/kernel.c                     |  2 +-
 xen/arch/arm/mm.c                         | 12 +++++-----
 xen/arch/arm/p2m.c                        |  4 ++--
 xen/arch/arm/traps.c                      |  4 ++--
 xen/arch/x86/debug.c                      | 10 ++++----
 xen/arch/x86/domain.c                     |  4 ++--
 xen/arch/x86/domain_build.c               | 10 ++++----
 xen/arch/x86/domain_page.c                | 22 ++++++++---------
 xen/arch/x86/domctl.c                     |  2 +-
 xen/arch/x86/mm.c                         | 40 +++++++++++++++----------------
 xen/arch/x86/mm/guest_walk.c              |  2 +-
 xen/arch/x86/mm/hap/guest_walk.c          |  2 +-
 xen/arch/x86/mm/mem_sharing.c             |  4 ++--
 xen/arch/x86/mm/p2m-ept.c                 | 22 ++++++++---------
 xen/arch/x86/mm/p2m-pod.c                 |  8 +++----
 xen/arch/x86/mm/p2m-pt.c                  | 28 +++++++++++-----------
 xen/arch/x86/mm/p2m.c                     |  2 +-
 xen/arch/x86/mm/paging.c                  | 32 ++++++++++++-------------
 xen/arch/x86/mm/shadow/common.c           |  2 +-
 xen/arch/x86/mm/shadow/multi.c            |  4 ++--
 xen/arch/x86/mm/shadow/private.h          |  2 +-
 xen/arch/x86/smpboot.c                    |  2 +-
 xen/arch/x86/tboot.c                      |  5 ++--
 xen/arch/x86/traps.c                      | 12 +++++-----
 xen/arch/x86/x86_64/mm.c                  | 14 +++++------
 xen/arch/x86/x86_64/traps.c               | 10 ++++----
 xen/arch/x86/x86_emulate.c                | 10 ++++----
 xen/common/grant_table.c                  |  4 ++--
 xen/common/kexec.c                        |  4 ++--
 xen/common/kimage.c                       | 10 ++++----
 xen/common/memory.c                       |  6 ++---
 xen/common/tmem_xen.c                     |  6 ++---
 xen/drivers/passthrough/amd/iommu_guest.c | 10 ++++----
 xen/drivers/passthrough/amd/iommu_map.c   | 14 +++++------
 xen/drivers/passthrough/vtd/x86/vtd.c     |  2 +-
 xen/include/asm-x86/hap.h                 |  2 +-
 xen/include/asm-x86/page.h                |  7 +++---
 xen/include/asm-x86/paging.h              |  2 +-
 xen/include/xen/domain_page.h             |  8 +++----
 40 files changed, 175 insertions(+), 173 deletions(-)

diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index 8556afd..a059de6 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -1408,7 +1408,7 @@ static void initrd_load(struct kernel_info *kinfo)
             return;
         }
 
-        dst = map_domain_page(ma>>PAGE_SHIFT);
+        dst = map_domain_page(_mfn(paddr_to_pfn(ma)));
 
         copy_from_paddr(dst + s, paddr + offs, l);
 
diff --git a/xen/arch/arm/kernel.c b/xen/arch/arm/kernel.c
index 209c3dd..f641b12 100644
--- a/xen/arch/arm/kernel.c
+++ b/xen/arch/arm/kernel.c
@@ -182,7 +182,7 @@ static void kernel_zimage_load(struct kernel_info *info)
             return;
         }
 
-        dst = map_domain_page(ma>>PAGE_SHIFT);
+        dst = map_domain_page(_mfn(paddr_to_pfn(ma)));
 
         copy_from_paddr(dst + s, paddr + offs, l);
 
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index d479048..ae0f34c 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -213,7 +213,7 @@ void dump_pt_walk(paddr_t ttbr, paddr_t addr,
     else
         root_table = 0;
 
-    mapping = map_domain_page(root_pfn + root_table);
+    mapping = map_domain_page(_mfn(root_pfn + root_table));
 
     for ( level = root_level; ; level++ )
     {
@@ -230,7 +230,7 @@ void dump_pt_walk(paddr_t ttbr, paddr_t addr,
 
         /* For next iteration */
         unmap_domain_page(mapping);
-        mapping = map_domain_page(pte.walk.base);
+        mapping = map_domain_page(_mfn(pte.walk.base));
     }
 
     unmap_domain_page(mapping);
@@ -282,11 +282,11 @@ void unmap_domain_page_global(const void *va)
 }
 
 /* Map a page of domheap memory */
-void *map_domain_page(unsigned long mfn)
+void *map_domain_page(mfn_t mfn)
 {
     unsigned long flags;
     lpae_t *map = this_cpu(xen_dommap);
-    unsigned long slot_mfn = mfn & ~LPAE_ENTRY_MASK;
+    unsigned long slot_mfn = mfn_x(mfn) & ~LPAE_ENTRY_MASK;
     vaddr_t va;
     lpae_t pte;
     int i, slot;
@@ -339,7 +339,7 @@ void *map_domain_page(unsigned long mfn)
 
     va = (DOMHEAP_VIRT_START
           + (slot << SECOND_SHIFT)
-          + ((mfn & LPAE_ENTRY_MASK) << THIRD_SHIFT));
+          + ((mfn_x(mfn) & LPAE_ENTRY_MASK) << THIRD_SHIFT));
 
     /*
      * We may not have flushed this specific subpage at map time,
@@ -386,7 +386,7 @@ unsigned long domain_page_map_to_mfn(const void *ptr)
 
 void flush_page_to_ram(unsigned long mfn)
 {
-    void *v = map_domain_page(mfn);
+    void *v = map_domain_page(_mfn(mfn));
 
     clean_and_invalidate_dcache_va_range(v, PAGE_SIZE);
     unmap_domain_page(v);
diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index 903fa3f..18fe91f 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -206,7 +206,7 @@ static paddr_t __p2m_lookup(struct domain *d, paddr_t 
paddr, p2m_type_t *t)
 
         /* Map for next level */
         unmap_domain_page(map);
-        map = map_domain_page(pte.p2m.base);
+        map = map_domain_page(_mfn(pte.p2m.base));
     }
 
     unmap_domain_page(map);
@@ -1078,7 +1078,7 @@ static int apply_p2m_changes(struct domain *d,
                 int i;
                 if ( mappings[level+1] )
                     unmap_domain_page(mappings[level+1]);
-                mappings[level+1] = map_domain_page(entry->p2m.base);
+                mappings[level+1] = map_domain_page(_mfn(entry->p2m.base));
                 cur_offset[level] = offset;
                 /* Any mapping further down is now invalid */
                 for ( i = level+1; i < 4; i++ )
diff --git a/xen/arch/arm/traps.c b/xen/arch/arm/traps.c
index 06fb40f..9d2bd6a 100644
--- a/xen/arch/arm/traps.c
+++ b/xen/arch/arm/traps.c
@@ -2293,7 +2293,7 @@ void dump_guest_s1_walk(struct domain *d, vaddr_t addr)
         printk("Failed TTBR0 maddr lookup\n");
         goto done;
     }
-    first = map_domain_page(paddr>>PAGE_SHIFT);
+    first = map_domain_page(_mfn(paddr_to_pfn(paddr)));
 
     offset = addr >> (12+10);
     printk("1ST[0x%"PRIx32"] (0x%"PRIpaddr") = 0x%08"PRIx32"\n",
@@ -2309,7 +2309,7 @@ void dump_guest_s1_walk(struct domain *d, vaddr_t addr)
         printk("Failed L1 entry maddr lookup\n");
         goto done;
     }
-    second = map_domain_page(paddr>>PAGE_SHIFT);
+    second = map_domain_page(_mfn(paddr_to_pfn(paddr)));
     offset = (addr >> 12) & 0x3FF;
     printk("2ND[0x%"PRIx32"] (0x%"PRIpaddr") = 0x%08"PRIx32"\n",
            offset, paddr, second[offset]);
diff --git a/xen/arch/x86/debug.c b/xen/arch/x86/debug.c
index 801dcf2..ee41463 100644
--- a/xen/arch/x86/debug.c
+++ b/xen/arch/x86/debug.c
@@ -108,7 +108,7 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t 
pgd3val)
 
     if ( pgd3val == 0 )
     {
-        l4t = map_domain_page(mfn);
+        l4t = map_domain_page(_mfn(mfn));
         l4e = l4t[l4_table_offset(vaddr)];
         unmap_domain_page(l4t);
         mfn = l4e_get_pfn(l4e);
@@ -120,7 +120,7 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t 
pgd3val)
             return INVALID_MFN;
         }
 
-        l3t = map_domain_page(mfn);
+        l3t = map_domain_page(_mfn(mfn));
         l3e = l3t[l3_table_offset(vaddr)];
         unmap_domain_page(l3t);
         mfn = l3e_get_pfn(l3e);
@@ -134,7 +134,7 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t 
pgd3val)
         }
     }
 
-    l2t = map_domain_page(mfn);
+    l2t = map_domain_page(_mfn(mfn));
     l2e = l2t[l2_table_offset(vaddr)];
     unmap_domain_page(l2t);
     mfn = l2e_get_pfn(l2e);
@@ -146,7 +146,7 @@ dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t 
pgd3val)
         DBGP1("l2 PAGE not present. vaddr:%lx cr3:%lx\n", vaddr, cr3);
         return INVALID_MFN;
     }
-    l1t = map_domain_page(mfn);
+    l1t = map_domain_page(_mfn(mfn));
     l1e = l1t[l1_table_offset(vaddr)];
     unmap_domain_page(l1t);
     mfn = l1e_get_pfn(l1e);
@@ -175,7 +175,7 @@ unsigned int dbg_rw_guest_mem(struct domain *dp, void * 
__user gaddr,
         if ( mfn == INVALID_MFN ) 
             break;
 
-        va = map_domain_page(mfn);
+        va = map_domain_page(_mfn(mfn));
         va = va + (addr & (PAGE_SIZE-1));
 
         if ( toaddr )
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 956ac70..34ecd7c 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -914,7 +914,7 @@ int arch_set_info_guest(
                 fail |= xen_pfn_to_cr3(pfn) != c.nat->ctrlreg[1];
             }
         } else {
-            l4_pgentry_t *l4tab = map_domain_page(pfn);
+            l4_pgentry_t *l4tab = map_domain_page(_mfn(pfn));
 
             pfn = l4e_get_pfn(*l4tab);
             unmap_domain_page(l4tab);
@@ -1074,7 +1074,7 @@ int arch_set_info_guest(
     {
         l4_pgentry_t *l4tab;
 
-        l4tab = map_domain_page(pagetable_get_pfn(v->arch.guest_table));
+        l4tab = map_domain_page(_mfn(pagetable_get_pfn(v->arch.guest_table)));
         *l4tab = l4e_from_pfn(page_to_mfn(cr3_page),
             _PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED);
         unmap_domain_page(l4tab);
diff --git a/xen/arch/x86/domain_build.c b/xen/arch/x86/domain_build.c
index a06379c..18cf6aa 100644
--- a/xen/arch/x86/domain_build.c
+++ b/xen/arch/x86/domain_build.c
@@ -630,7 +630,7 @@ static __init void pvh_fixup_page_tables_for_hap(struct 
vcpu *v,
 
     ASSERT(paging_mode_enabled(v->domain));
 
-    l4start = map_domain_page(pagetable_get_pfn(v->arch.guest_table));
+    l4start = map_domain_page(_mfn(pagetable_get_pfn(v->arch.guest_table)));
 
     /* Clear entries prior to guest L4 start */
     pl4e = l4start + l4_table_offset(v_start);
@@ -746,7 +746,7 @@ static __init void setup_pv_physmap(struct domain *d, 
unsigned long pgtbl_pfn,
                                     unsigned long nr_pages)
 {
     struct page_info *page = NULL;
-    l4_pgentry_t *pl4e, *l4start = map_domain_page(pgtbl_pfn);
+    l4_pgentry_t *pl4e, *l4start = map_domain_page(_mfn(pgtbl_pfn));
     l3_pgentry_t *pl3e = NULL;
     l2_pgentry_t *pl2e = NULL;
     l1_pgentry_t *pl1e = NULL;
@@ -789,7 +789,7 @@ static __init void setup_pv_physmap(struct domain *d, 
unsigned long pgtbl_pfn,
             clear_page(pl3e);
             *pl4e = l4e_from_page(page, L4_PROT);
         } else
-            pl3e = map_domain_page(l4e_get_pfn(*pl4e));
+            pl3e = map_domain_page(_mfn(l4e_get_pfn(*pl4e)));
 
         pl3e += l3_table_offset(vphysmap_start);
         if ( !l3e_get_intpte(*pl3e) )
@@ -816,7 +816,7 @@ static __init void setup_pv_physmap(struct domain *d, 
unsigned long pgtbl_pfn,
             *pl3e = l3e_from_page(page, L3_PROT);
         }
         else
-           pl2e = map_domain_page(l3e_get_pfn(*pl3e));
+            pl2e = map_domain_page(_mfn(l3e_get_pfn(*pl3e)));
 
         pl2e += l2_table_offset(vphysmap_start);
         if ( !l2e_get_intpte(*pl2e) )
@@ -844,7 +844,7 @@ static __init void setup_pv_physmap(struct domain *d, 
unsigned long pgtbl_pfn,
             *pl2e = l2e_from_page(page, L2_PROT);
         }
         else
-            pl1e = map_domain_page(l2e_get_pfn(*pl2e));
+            pl1e = map_domain_page(_mfn(l2e_get_pfn(*pl2e)));
 
         pl1e += l1_table_offset(vphysmap_start);
         BUG_ON(l1e_get_intpte(*pl1e));
diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c
index 0f7548b..d86f8fe 100644
--- a/xen/arch/x86/domain_page.c
+++ b/xen/arch/x86/domain_page.c
@@ -66,7 +66,7 @@ void __init mapcache_override_current(struct vcpu *v)
 #define MAPCACHE_L1ENT(idx) \
     __linear_l1_table[l1_linear_offset(MAPCACHE_VIRT_START + 
pfn_to_paddr(idx))]
 
-void *map_domain_page(unsigned long mfn)
+void *map_domain_page(mfn_t mfn)
 {
     unsigned long flags;
     unsigned int idx, i;
@@ -76,31 +76,31 @@ void *map_domain_page(unsigned long mfn)
     struct vcpu_maphash_entry *hashent;
 
 #ifdef NDEBUG
-    if ( mfn <= PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) )
-        return mfn_to_virt(mfn);
+    if ( mfn_x(mfn) <= PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) )
+        return mfn_to_virt(mfn_x(mfn));
 #endif
 
     v = mapcache_current_vcpu();
     if ( !v || !is_pv_vcpu(v) )
-        return mfn_to_virt(mfn);
+        return mfn_to_virt(mfn_x(mfn));
 
     dcache = &v->domain->arch.pv_domain.mapcache;
     vcache = &v->arch.pv_vcpu.mapcache;
     if ( !dcache->inuse )
-        return mfn_to_virt(mfn);
+        return mfn_to_virt(mfn_x(mfn));
 
     perfc_incr(map_domain_page_count);
 
     local_irq_save(flags);
 
-    hashent = &vcache->hash[MAPHASH_HASHFN(mfn)];
-    if ( hashent->mfn == mfn )
+    hashent = &vcache->hash[MAPHASH_HASHFN(mfn_x(mfn))];
+    if ( hashent->mfn == mfn_x(mfn) )
     {
         idx = hashent->idx;
         ASSERT(idx < dcache->entries);
         hashent->refcnt++;
         ASSERT(hashent->refcnt);
-        ASSERT(l1e_get_pfn(MAPCACHE_L1ENT(idx)) == mfn);
+        ASSERT(l1e_get_pfn(MAPCACHE_L1ENT(idx)) == mfn_x(mfn));
         goto out;
     }
 
@@ -135,7 +135,7 @@ void *map_domain_page(unsigned long mfn)
         else
         {
             /* Replace a hash entry instead. */
-            i = MAPHASH_HASHFN(mfn);
+            i = MAPHASH_HASHFN(mfn_x(mfn));
             do {
                 hashent = &vcache->hash[i];
                 if ( hashent->idx != MAPHASHENT_NOTINUSE && !hashent->refcnt )
@@ -149,7 +149,7 @@ void *map_domain_page(unsigned long mfn)
                 }
                 if ( ++i == MAPHASH_ENTRIES )
                     i = 0;
-            } while ( i != MAPHASH_HASHFN(mfn) );
+            } while ( i != MAPHASH_HASHFN(mfn_x(mfn)) );
         }
         BUG_ON(idx >= dcache->entries);
 
@@ -165,7 +165,7 @@ void *map_domain_page(unsigned long mfn)
 
     spin_unlock(&dcache->lock);
 
-    l1e_write(&MAPCACHE_L1ENT(idx), l1e_from_pfn(mfn, __PAGE_HYPERVISOR_RW));
+    l1e_write(&MAPCACHE_L1ENT(idx), l1e_from_pfn(mfn_x(mfn), 
__PAGE_HYPERVISOR_RW));
 
  out:
     local_irq_restore(flags);
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index b5047db..bf62a88 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1316,7 +1316,7 @@ void arch_get_info_guest(struct vcpu *v, 
vcpu_guest_context_u c)
         else
         {
             const l4_pgentry_t *l4e =
-                map_domain_page(pagetable_get_pfn(v->arch.guest_table));
+                map_domain_page(_mfn(pagetable_get_pfn(v->arch.guest_table)));
 
             c.cmp->ctrlreg[3] = compat_pfn_to_cr3(l4e_get_pfn(*l4e));
             unmap_domain_page(l4e);
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index df9c190..342414f 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1174,7 +1174,7 @@ static int alloc_l1_table(struct page_info *page)
     unsigned int   i;
     int            ret = 0;
 
-    pl1e = map_domain_page(pfn);
+    pl1e = map_domain_page(_mfn(pfn));
 
     for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
     {
@@ -1255,7 +1255,7 @@ static int alloc_l2_table(struct page_info *page, 
unsigned long type,
     unsigned int   i;
     int            rc = 0;
 
-    pl2e = map_domain_page(pfn);
+    pl2e = map_domain_page(_mfn(pfn));
 
     for ( i = page->nr_validated_ptes; i < L2_PAGETABLE_ENTRIES; i++ )
     {
@@ -1304,7 +1304,7 @@ static int alloc_l3_table(struct page_info *page)
     unsigned int   i;
     int            rc = 0, partial = page->partial_pte;
 
-    pl3e = map_domain_page(pfn);
+    pl3e = map_domain_page(_mfn(pfn));
 
     /*
      * PAE guests allocate full pages, but aren't required to initialize
@@ -1396,7 +1396,7 @@ void init_guest_l4_table(l4_pgentry_t l4tab[], const 
struct domain *d,
 
 void fill_ro_mpt(unsigned long mfn)
 {
-    l4_pgentry_t *l4tab = map_domain_page(mfn);
+    l4_pgentry_t *l4tab = map_domain_page(_mfn(mfn));
 
     l4tab[l4_table_offset(RO_MPT_VIRT_START)] =
         idle_pg_table[l4_table_offset(RO_MPT_VIRT_START)];
@@ -1405,7 +1405,7 @@ void fill_ro_mpt(unsigned long mfn)
 
 void zap_ro_mpt(unsigned long mfn)
 {
-    l4_pgentry_t *l4tab = map_domain_page(mfn);
+    l4_pgentry_t *l4tab = map_domain_page(_mfn(mfn));
 
     l4tab[l4_table_offset(RO_MPT_VIRT_START)] = l4e_empty();
     unmap_domain_page(l4tab);
@@ -1415,7 +1415,7 @@ static int alloc_l4_table(struct page_info *page)
 {
     struct domain *d = page_get_owner(page);
     unsigned long  pfn = page_to_mfn(page);
-    l4_pgentry_t  *pl4e = map_domain_page(pfn);
+    l4_pgentry_t  *pl4e = map_domain_page(_mfn(pfn));
     unsigned int   i;
     int            rc = 0, partial = page->partial_pte;
 
@@ -1471,7 +1471,7 @@ static void free_l1_table(struct page_info *page)
     l1_pgentry_t *pl1e;
     unsigned int  i;
 
-    pl1e = map_domain_page(pfn);
+    pl1e = map_domain_page(_mfn(pfn));
 
     for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
         if ( is_guest_l1_slot(i) )
@@ -1489,7 +1489,7 @@ static int free_l2_table(struct page_info *page, int 
preemptible)
     unsigned int  i = page->nr_validated_ptes - 1;
     int err = 0;
 
-    pl2e = map_domain_page(pfn);
+    pl2e = map_domain_page(_mfn(pfn));
 
     ASSERT(page->nr_validated_ptes);
     do {
@@ -1518,7 +1518,7 @@ static int free_l3_table(struct page_info *page)
     int rc = 0, partial = page->partial_pte;
     unsigned int  i = page->nr_validated_ptes - !partial;
 
-    pl3e = map_domain_page(pfn);
+    pl3e = map_domain_page(_mfn(pfn));
 
     do {
         if ( is_guest_l3_slot(i) )
@@ -1553,7 +1553,7 @@ static int free_l4_table(struct page_info *page)
 {
     struct domain *d = page_get_owner(page);
     unsigned long pfn = page_to_mfn(page);
-    l4_pgentry_t *pl4e = map_domain_page(pfn);
+    l4_pgentry_t *pl4e = map_domain_page(_mfn(pfn));
     int rc = 0, partial = page->partial_pte;
     unsigned int  i = page->nr_validated_ptes - !partial;
 
@@ -2653,7 +2653,7 @@ int vcpu_destroy_pagetables(struct vcpu *v)
 
     if ( is_pv_32bit_vcpu(v) )
     {
-        l4tab = map_domain_page(mfn);
+        l4tab = map_domain_page(_mfn(mfn));
         mfn = l4e_get_pfn(*l4tab);
     }
 
@@ -2709,7 +2709,7 @@ int new_guest_cr3(unsigned long mfn)
     if ( is_pv_32bit_domain(d) )
     {
         unsigned long gt_mfn = pagetable_get_pfn(curr->arch.guest_table);
-        l4_pgentry_t *pl4e = map_domain_page(gt_mfn);
+        l4_pgentry_t *pl4e = map_domain_page(_mfn(gt_mfn));
 
         rc = paging_mode_refcounts(d)
              ? -EINVAL /* Old code was broken, but what should it be? */
@@ -3768,7 +3768,7 @@ static int create_grant_pte_mapping(
     }
     
     mfn = page_to_mfn(page);
-    va = map_domain_page(mfn);
+    va = map_domain_page(_mfn(mfn));
     va = (void *)((unsigned long)va + ((unsigned long)pte_addr & ~PAGE_MASK));
 
     if ( !page_lock(page) )
@@ -3823,7 +3823,7 @@ static int destroy_grant_pte_mapping(
     }
     
     mfn = page_to_mfn(page);
-    va = map_domain_page(mfn);
+    va = map_domain_page(_mfn(mfn));
     va = (void *)((unsigned long)va + ((unsigned long)addr & ~PAGE_MASK));
 
     if ( !page_lock(page) )
@@ -4501,7 +4501,7 @@ long do_update_descriptor(u64 pa, u64 desc)
     paging_mark_dirty(dom, mfn);
 
     /* All is good so make the update. */
-    gdt_pent = map_domain_page(mfn);
+    gdt_pent = map_domain_page(_mfn(mfn));
     write_atomic((uint64_t *)&gdt_pent[offset], *(uint64_t *)&d);
     unmap_domain_page(gdt_pent);
 
@@ -5039,7 +5039,7 @@ static int ptwr_emulated_update(
     adjust_guest_l1e(nl1e, d);
 
     /* Checked successfully: do the update (write or cmpxchg). */
-    pl1e = map_domain_page(mfn);
+    pl1e = map_domain_page(_mfn(mfn));
     pl1e = (l1_pgentry_t *)((unsigned long)pl1e + (addr & ~PAGE_MASK));
     if ( do_cmpxchg )
     {
@@ -5954,7 +5954,7 @@ int create_perdomain_mapping(struct domain *d, unsigned 
long va,
         l3tab[l3_table_offset(va)] = l3e_from_page(pg, __PAGE_HYPERVISOR);
     }
     else
-        l2tab = map_domain_page(l3e_get_pfn(l3tab[l3_table_offset(va)]));
+        l2tab = map_domain_page(_mfn(l3e_get_pfn(l3tab[l3_table_offset(va)])));
 
     unmap_domain_page(l3tab);
 
@@ -5996,7 +5996,7 @@ int create_perdomain_mapping(struct domain *d, unsigned 
long va,
             *pl2e = l2e_from_page(pg, __PAGE_HYPERVISOR);
         }
         else if ( !l1tab )
-            l1tab = map_domain_page(l2e_get_pfn(*pl2e));
+            l1tab = map_domain_page(_mfn(l2e_get_pfn(*pl2e)));
 
         if ( ppg &&
              !(l1e_get_flags(l1tab[l1_table_offset(va)]) & _PAGE_PRESENT) )
@@ -6047,7 +6047,7 @@ void destroy_perdomain_mapping(struct domain *d, unsigned 
long va,
 
     if ( l3e_get_flags(*pl3e) & _PAGE_PRESENT )
     {
-        const l2_pgentry_t *l2tab = map_domain_page(l3e_get_pfn(*pl3e));
+        const l2_pgentry_t *l2tab = map_domain_page(_mfn(l3e_get_pfn(*pl3e)));
         const l2_pgentry_t *pl2e = l2tab + l2_table_offset(va);
         unsigned int i = l1_table_offset(va);
 
@@ -6055,7 +6055,7 @@ void destroy_perdomain_mapping(struct domain *d, unsigned 
long va,
         {
             if ( l2e_get_flags(*pl2e) & _PAGE_PRESENT )
             {
-                l1_pgentry_t *l1tab = map_domain_page(l2e_get_pfn(*pl2e));
+                l1_pgentry_t *l1tab = 
map_domain_page(_mfn(l2e_get_pfn(*pl2e)));
 
                 for ( ; nr && i < L1_PAGETABLE_ENTRIES; --nr, ++i )
                 {
diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c
index 9c6c74f..30a653d 100644
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -121,7 +121,7 @@ void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, 
mfn_t *mfn,
     *mfn = _mfn(page_to_mfn(page));
     ASSERT(mfn_valid(mfn_x(*mfn)));
 
-    map = map_domain_page(mfn_x(*mfn));
+    map = map_domain_page(*mfn);
     return map;
 }
 
diff --git a/xen/arch/x86/mm/hap/guest_walk.c b/xen/arch/x86/mm/hap/guest_walk.c
index 381a196..62ab454 100644
--- a/xen/arch/x86/mm/hap/guest_walk.c
+++ b/xen/arch/x86/mm/hap/guest_walk.c
@@ -87,7 +87,7 @@ unsigned long hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)(
 
     /* Map the top-level table and call the tree-walker */
     ASSERT(mfn_valid(mfn_x(top_mfn)));
-    top_map = map_domain_page(mfn_x(top_mfn));
+    top_map = map_domain_page(top_mfn);
 #if GUEST_PAGING_LEVELS == 3
     top_map += (cr3 & ~(PAGE_MASK | 31));
 #endif
diff --git a/xen/arch/x86/mm/mem_sharing.c b/xen/arch/x86/mm/mem_sharing.c
index 16e329e..1a01e45 100644
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -1210,8 +1210,8 @@ int __mem_sharing_unshare_page(struct domain *d,
         return -ENOMEM;
     }
 
-    s = map_domain_page(__page_to_mfn(old_page));
-    t = map_domain_page(__page_to_mfn(page));
+    s = map_domain_page(_mfn(__page_to_mfn(old_page)));
+    t = map_domain_page(_mfn(__page_to_mfn(page)));
     memcpy(t, s, PAGE_SIZE);
     unmap_domain_page(s);
     unmap_domain_page(t);
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index a8737be..e7ff739 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -246,7 +246,7 @@ static void ept_free_entry(struct p2m_domain *p2m, 
ept_entry_t *ept_entry, int l
 
     if ( level > 1 )
     {
-        ept_entry_t *epte = map_domain_page(ept_entry->mfn);
+        ept_entry_t *epte = map_domain_page(_mfn(ept_entry->mfn));
         for ( int i = 0; i < EPT_PAGETABLE_ENTRIES; i++ )
             ept_free_entry(p2m, epte + i, level - 1);
         unmap_domain_page(epte);
@@ -271,7 +271,7 @@ static int ept_split_super_page(struct p2m_domain *p2m, 
ept_entry_t *ept_entry,
     if ( !ept_set_middle_entry(p2m, &new_ept) )
         return 0;
 
-    table = map_domain_page(new_ept.mfn);
+    table = map_domain_page(_mfn(new_ept.mfn));
     trunk = 1UL << ((level - 1) * EPT_TABLE_ORDER);
 
     for ( int i = 0; i < EPT_PAGETABLE_ENTRIES; i++ )
@@ -359,7 +359,7 @@ static int ept_next_level(struct p2m_domain *p2m, bool_t 
read_only,
 
     mfn = e.mfn;
     unmap_domain_page(*table);
-    *table = map_domain_page(mfn);
+    *table = map_domain_page(_mfn(mfn));
     *gfn_remainder &= (1UL << shift) - 1;
     return GUEST_TABLE_NORMAL_PAGE;
 }
@@ -372,7 +372,7 @@ static int ept_next_level(struct p2m_domain *p2m, bool_t 
read_only,
 static bool_t ept_invalidate_emt(mfn_t mfn, bool_t recalc, int level)
 {
     int rc;
-    ept_entry_t *epte = map_domain_page(mfn_x(mfn));
+    ept_entry_t *epte = map_domain_page(mfn);
     unsigned int i;
     bool_t changed = 0;
 
@@ -414,7 +414,7 @@ static int ept_invalidate_emt_range(struct p2m_domain *p2m,
     unsigned int i, index;
     int wrc, rc = 0, ret = GUEST_TABLE_MAP_FAILED;
 
-    table = map_domain_page(pagetable_get_pfn(p2m_get_pagetable(p2m)));
+    table = map_domain_page(_mfn(pagetable_get_pfn(p2m_get_pagetable(p2m))));
     for ( i = ept_get_wl(&p2m->ept); i > target; --i )
     {
         ret = ept_next_level(p2m, 1, &table, &gfn_remainder, i);
@@ -498,7 +498,7 @@ static int resolve_misconfig(struct p2m_domain *p2m, 
unsigned long gfn)
         ept_entry_t e;
         unsigned int i;
 
-        epte = map_domain_page(mfn);
+        epte = map_domain_page(_mfn(mfn));
         i = (gfn >> (level * EPT_TABLE_ORDER)) & (EPT_PAGETABLE_ENTRIES - 1);
         e = atomic_read_ept_entry(&epte[i]);
 
@@ -689,7 +689,7 @@ ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, 
mfn_t mfn,
            (target == 0));
     ASSERT(!p2m_is_foreign(p2mt) || target == 0);
 
-    table = map_domain_page(pagetable_get_pfn(p2m_get_pagetable(p2m)));
+    table = map_domain_page(_mfn(pagetable_get_pfn(p2m_get_pagetable(p2m))));
 
     ret = GUEST_TABLE_MAP_FAILED;
     for ( i = ept_get_wl(ept); i > target; i-- )
@@ -840,7 +840,7 @@ static mfn_t ept_get_entry(struct p2m_domain *p2m,
                            unsigned long gfn, p2m_type_t *t, p2m_access_t* a,
                            p2m_query_t q, unsigned int *page_order)
 {
-    ept_entry_t *table = 
map_domain_page(pagetable_get_pfn(p2m_get_pagetable(p2m)));
+    ept_entry_t *table = 
map_domain_page(_mfn(pagetable_get_pfn(p2m_get_pagetable(p2m))));
     unsigned long gfn_remainder = gfn;
     ept_entry_t *ept_entry;
     u32 index;
@@ -944,7 +944,7 @@ void ept_walk_table(struct domain *d, unsigned long gfn)
 {
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
     struct ept_data *ept = &p2m->ept;
-    ept_entry_t *table = 
map_domain_page(pagetable_get_pfn(p2m_get_pagetable(p2m)));
+    ept_entry_t *table = 
map_domain_page(_mfn(pagetable_get_pfn(p2m_get_pagetable(p2m))));
     unsigned long gfn_remainder = gfn;
 
     int i;
@@ -977,7 +977,7 @@ void ept_walk_table(struct domain *d, unsigned long gfn)
         {
             gfn_remainder &= (1UL << (i*EPT_TABLE_ORDER)) - 1;
 
-            next = map_domain_page(ept_entry->mfn);
+            next = map_domain_page(_mfn(ept_entry->mfn));
 
             unmap_domain_page(table);
 
@@ -1188,7 +1188,7 @@ static void ept_dump_p2m_table(unsigned char key)
             char c = 0;
 
             gfn_remainder = gfn;
-            table = map_domain_page(pagetable_get_pfn(p2m_get_pagetable(p2m)));
+            table = 
map_domain_page(_mfn(pagetable_get_pfn(p2m_get_pagetable(p2m))));
 
             for ( i = ept_get_wl(ept); i > 0; i-- )
             {
diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
index 0679f00..6e27bcd 100644
--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -109,7 +109,7 @@ p2m_pod_cache_add(struct p2m_domain *p2m,
      */
     for ( i = 0; i < (1 << order); i++ )
     {
-        char *b = map_domain_page(mfn_x(page_to_mfn(page)) + i);
+        char *b = map_domain_page(_mfn(mfn_x(page_to_mfn(page)) + i));
         clear_page(b);
         unmap_domain_page(b);
     }
@@ -710,7 +710,7 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, 
unsigned long gfn)
     for ( i=0; i<SUPERPAGE_PAGES; i++ )
     {
         /* Quick zero-check */
-        map = map_domain_page(mfn_x(mfn0) + i);
+        map = map_domain_page(_mfn(mfn_x(mfn0) + i));
 
         for ( j=0; j<16; j++ )
             if( *(map+j) != 0 )
@@ -743,7 +743,7 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, 
unsigned long gfn)
     /* Finally, do a full zero-check */
     for ( i=0; i < SUPERPAGE_PAGES; i++ )
     {
-        map = map_domain_page(mfn_x(mfn0) + i);
+        map = map_domain_page(_mfn(mfn_x(mfn0) + i));
 
         for ( j=0; j<PAGE_SIZE/sizeof(*map); j++ )
             if( *(map+j) != 0 )
@@ -815,7 +815,7 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long 
*gfns, int count)
              && ( (mfn_to_page(mfns[i])->count_info & PGC_allocated) != 0 ) 
              && ( (mfn_to_page(mfns[i])->count_info & 
(PGC_page_table|PGC_xen_heap)) == 0 ) 
              && ( (mfn_to_page(mfns[i])->count_info & PGC_count_mask) <= 
max_ref ) )
-            map[i] = map_domain_page(mfn_x(mfns[i]));
+            map[i] = map_domain_page(mfns[i]);
         else
             map[i] = NULL;
     }
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index e50b6fa..a6dd464 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -146,7 +146,7 @@ p2m_free_entry(struct p2m_domain *p2m, l1_pgentry_t 
*p2m_entry, int page_order)
 
     if ( page_order > PAGE_ORDER_2M )
     {
-        l1_pgentry_t *l3_table = map_domain_page(l1e_get_pfn(*p2m_entry));
+        l1_pgentry_t *l3_table = 
map_domain_page(_mfn(l1e_get_pfn(*p2m_entry)));
         for ( int i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
             p2m_free_entry(p2m, l3_table + i, page_order - 9);
         unmap_domain_page(l3_table);
@@ -280,7 +280,7 @@ p2m_next_level(struct p2m_domain *p2m, void **table,
         p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 2);
     }
 
-    next = map_domain_page(l1e_get_pfn(*p2m_entry));
+    next = map_domain_page(_mfn(l1e_get_pfn(*p2m_entry)));
     if ( unmap )
         unmap_domain_page(*table);
     *table = next;
@@ -304,7 +304,7 @@ static int p2m_pt_set_recalc_range(struct p2m_domain *p2m,
     l1_pgentry_t *pent, *plast;
     int err = 0;
 
-    table = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
+    table = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
     for ( i = 4; i-- > level; )
     {
         remainder = gfn_remainder;
@@ -366,7 +366,7 @@ static int do_recalc(struct p2m_domain *p2m, unsigned long 
gfn)
     l1_pgentry_t *pent;
     int err = 0;
 
-    table = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
+    table = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
     while ( --level )
     {
         unsigned long remainder = gfn_remainder;
@@ -524,7 +524,7 @@ p2m_pt_set_entry(struct p2m_domain *p2m, unsigned long gfn, 
mfn_t mfn,
     if ( rc < 0 )
         return rc;
 
-    table = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
+    table = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
     rc = p2m_next_level(p2m, &table, &gfn_remainder, gfn,
                         L4_PAGETABLE_SHIFT - PAGE_SHIFT,
                         L4_PAGETABLE_ENTRIES, PGT_l3_page_table, 1);
@@ -716,7 +716,7 @@ p2m_pt_get_entry(struct p2m_domain *p2m, unsigned long gfn,
     mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
 
     {
-        l4_pgentry_t *l4e = map_domain_page(mfn_x(mfn));
+        l4_pgentry_t *l4e = map_domain_page(mfn);
         l4e += l4_table_offset(addr);
         if ( (l4e_get_flags(*l4e) & _PAGE_PRESENT) == 0 )
         {
@@ -728,7 +728,7 @@ p2m_pt_get_entry(struct p2m_domain *p2m, unsigned long gfn,
         unmap_domain_page(l4e);
     }
     {
-        l3_pgentry_t *l3e = map_domain_page(mfn_x(mfn));
+        l3_pgentry_t *l3e = map_domain_page(mfn);
         l3e += l3_table_offset(addr);
 pod_retry_l3:
         flags = l3e_get_flags(*l3e);
@@ -769,7 +769,7 @@ pod_retry_l3:
         unmap_domain_page(l3e);
     }
 
-    l2e = map_domain_page(mfn_x(mfn));
+    l2e = map_domain_page(mfn);
     l2e += l2_table_offset(addr);
 
 pod_retry_l2:
@@ -807,7 +807,7 @@ pod_retry_l2:
         recalc = 1;
     unmap_domain_page(l2e);
 
-    l1e = map_domain_page(mfn_x(mfn));
+    l1e = map_domain_page(mfn);
     l1e += l1_table_offset(addr);
 pod_retry_l1:
     flags = l1e_get_flags(*l1e);
@@ -849,7 +849,7 @@ static void p2m_pt_change_entry_type_global(struct 
p2m_domain *p2m,
 
     ASSERT(hap_enabled(p2m->domain));
 
-    tab = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
+    tab = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
     for ( changed = i = 0; i < (1 << PAGETABLE_ORDER); ++i )
     {
         l1_pgentry_t e = tab[i];
@@ -929,7 +929,7 @@ long p2m_pt_audit_p2m(struct p2m_domain *p2m)
         l4_pgentry_t *l4e;
         l3_pgentry_t *l3e;
         int i4, i3;
-        l4e = 
map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m))));
+        l4e = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m)));
 
         gfn = 0;
         for ( i4 = 0; i4 < L4_PAGETABLE_ENTRIES; i4++ )
@@ -939,7 +939,7 @@ long p2m_pt_audit_p2m(struct p2m_domain *p2m)
                 gfn += 1 << (L4_PAGETABLE_SHIFT - PAGE_SHIFT);
                 continue;
             }
-            l3e = map_domain_page(l4e_get_pfn(l4e[i4]));
+            l3e = map_domain_page(_mfn(l4e_get_pfn(l4e[i4])));
             for ( i3 = 0;
                   i3 < L3_PAGETABLE_ENTRIES;
                   i3++ )
@@ -974,7 +974,7 @@ long p2m_pt_audit_p2m(struct p2m_domain *p2m)
                     }
                 }
 
-                l2e = map_domain_page(l3e_get_pfn(l3e[i3]));
+                l2e = map_domain_page(_mfn(l3e_get_pfn(l3e[i3])));
                 for ( i2 = 0; i2 < L2_PAGETABLE_ENTRIES; i2++ )
                 {
                     if ( !(l2e_get_flags(l2e[i2]) & _PAGE_PRESENT) )
@@ -1010,7 +1010,7 @@ long p2m_pt_audit_p2m(struct p2m_domain *p2m)
                         continue;
                     }
 
-                    l1e = map_domain_page(l2e_get_pfn(l2e[i2]));
+                    l1e = map_domain_page(_mfn(l2e_get_pfn(l2e[i2])));
 
                     for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++, gfn++ )
                     {
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 64ffeeb..4fa3cd8 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1285,7 +1285,7 @@ int p2m_mem_paging_prep(struct domain *d, unsigned long 
gfn, uint64_t buffer)
         int rc;
 
         ASSERT( mfn_valid(mfn) );
-        guest_map = map_domain_page(mfn_x(mfn));
+        guest_map = map_domain_page(mfn);
         rc = copy_from_user(guest_map, user_ptr, PAGE_SIZE);
         unmap_domain_page(guest_map);
         if ( rc )
diff --git a/xen/arch/x86/mm/paging.c b/xen/arch/x86/mm/paging.c
index 59d4720..7089155 100644
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -81,7 +81,7 @@ static mfn_t paging_new_log_dirty_leaf(struct domain *d)
     mfn_t mfn = paging_new_log_dirty_page(d);
     if ( mfn_valid(mfn) )
     {
-        void *leaf = map_domain_page(mfn_x(mfn));
+        void *leaf = map_domain_page(mfn);
         clear_page(leaf);
         unmap_domain_page(leaf);
     }
@@ -95,7 +95,7 @@ static mfn_t paging_new_log_dirty_node(struct domain *d)
     if ( mfn_valid(mfn) )
     {
         int i;
-        mfn_t *node = map_domain_page(mfn_x(mfn));
+        mfn_t *node = map_domain_page(mfn);
         for ( i = 0; i < LOGDIRTY_NODE_ENTRIES; i++ )
             node[i] = _mfn(INVALID_MFN);
         unmap_domain_page(node);
@@ -107,7 +107,7 @@ static mfn_t paging_new_log_dirty_node(struct domain *d)
 static mfn_t *paging_map_log_dirty_bitmap(struct domain *d)
 {
     if ( likely(mfn_valid(d->arch.paging.log_dirty.top)) )
-        return map_domain_page(mfn_x(d->arch.paging.log_dirty.top));
+        return map_domain_page(d->arch.paging.log_dirty.top);
     return NULL;
 }
 
@@ -144,7 +144,7 @@ static int paging_free_log_dirty_bitmap(struct domain *d, 
int rc)
         return -EBUSY;
     }
 
-    l4 = map_domain_page(mfn_x(d->arch.paging.log_dirty.top));
+    l4 = map_domain_page(d->arch.paging.log_dirty.top);
     i4 = d->arch.paging.preempt.log_dirty.i4;
     i3 = d->arch.paging.preempt.log_dirty.i3;
     rc = 0;
@@ -154,14 +154,14 @@ static int paging_free_log_dirty_bitmap(struct domain *d, 
int rc)
         if ( !mfn_valid(l4[i4]) )
             continue;
 
-        l3 = map_domain_page(mfn_x(l4[i4]));
+        l3 = map_domain_page(l4[i4]);
 
         for ( ; i3 < LOGDIRTY_NODE_ENTRIES; i3++ )
         {
             if ( !mfn_valid(l3[i3]) )
                 continue;
 
-            l2 = map_domain_page(mfn_x(l3[i3]));
+            l2 = map_domain_page(l3[i3]);
 
             for ( i2 = 0; i2 < LOGDIRTY_NODE_ENTRIES; i2++ )
                 if ( mfn_valid(l2[i2]) )
@@ -311,7 +311,7 @@ void paging_mark_gfn_dirty(struct domain *d, unsigned long 
pfn)
     if ( !mfn_valid(mfn) )
         goto out;
 
-    l3 = map_domain_page(mfn_x(mfn));
+    l3 = map_domain_page(mfn);
     mfn = l3[i3];
     if ( !mfn_valid(mfn) )
         l3[i3] = mfn = paging_new_log_dirty_node(d);
@@ -319,7 +319,7 @@ void paging_mark_gfn_dirty(struct domain *d, unsigned long 
pfn)
     if ( !mfn_valid(mfn) )
         goto out;
 
-    l2 = map_domain_page(mfn_x(mfn));
+    l2 = map_domain_page(mfn);
     mfn = l2[i2];
     if ( !mfn_valid(mfn) )
         l2[i2] = mfn = paging_new_log_dirty_leaf(d);
@@ -327,7 +327,7 @@ void paging_mark_gfn_dirty(struct domain *d, unsigned long 
pfn)
     if ( !mfn_valid(mfn) )
         goto out;
 
-    l1 = map_domain_page(mfn_x(mfn));
+    l1 = map_domain_page(mfn);
     changed = !__test_and_set_bit(i1, l1);
     unmap_domain_page(l1);
     if ( changed )
@@ -384,25 +384,25 @@ int paging_mfn_is_dirty(struct domain *d, mfn_t gmfn)
     if ( !mfn_valid(mfn) )
         return 0;
 
-    l4 = map_domain_page(mfn_x(mfn));
+    l4 = map_domain_page(mfn);
     mfn = l4[L4_LOGDIRTY_IDX(pfn)];
     unmap_domain_page(l4);
     if ( !mfn_valid(mfn) )
         return 0;
 
-    l3 = map_domain_page(mfn_x(mfn));
+    l3 = map_domain_page(mfn);
     mfn = l3[L3_LOGDIRTY_IDX(pfn)];
     unmap_domain_page(l3);
     if ( !mfn_valid(mfn) )
         return 0;
 
-    l2 = map_domain_page(mfn_x(mfn));
+    l2 = map_domain_page(mfn);
     mfn = l2[L2_LOGDIRTY_IDX(pfn)];
     unmap_domain_page(l2);
     if ( !mfn_valid(mfn) )
         return 0;
 
-    l1 = map_domain_page(mfn_x(mfn));
+    l1 = map_domain_page(mfn);
     rv = test_bit(L1_LOGDIRTY_IDX(pfn), l1);
     unmap_domain_page(l1);
     return rv;
@@ -476,18 +476,18 @@ static int paging_log_dirty_op(struct domain *d,
 
     for ( ; (pages < sc->pages) && (i4 < LOGDIRTY_NODE_ENTRIES); i4++, i3 = 0 )
     {
-        l3 = (l4 && mfn_valid(l4[i4])) ? map_domain_page(mfn_x(l4[i4])) : NULL;
+        l3 = (l4 && mfn_valid(l4[i4])) ? map_domain_page(l4[i4]) : NULL;
         for ( ; (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES); i3++ )
         {
             l2 = ((l3 && mfn_valid(l3[i3])) ?
-                  map_domain_page(mfn_x(l3[i3])) : NULL);
+                  map_domain_page(l3[i3]) : NULL);
             for ( i2 = 0;
                   (pages < sc->pages) && (i2 < LOGDIRTY_NODE_ENTRIES);
                   i2++ )
             {
                 unsigned int bytes = PAGE_SIZE;
                 l1 = ((l2 && mfn_valid(l2[i2])) ?
-                      map_domain_page(mfn_x(l2[i2])) : NULL);
+                      map_domain_page(l2[i2]) : NULL);
                 if ( unlikely(((sc->pages - pages + 7) >> 3) < bytes) )
                     bytes = (unsigned int)((sc->pages - pages + 7) >> 3);
                 if ( likely(peek) )
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 97de133..c36ffeb 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -3393,7 +3393,7 @@ static void sh_unshadow_for_p2m_change(struct domain *d, 
unsigned long gfn,
             if ( (l1e_get_flags(new) & _PAGE_PRESENT)
                  && !(l1e_get_flags(new) & _PAGE_PSE)
                  && mfn_valid(nmfn) )
-                npte = map_domain_page(mfn_x(nmfn));
+                npte = map_domain_page(nmfn);
 
             for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
             {
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 19644d2..0a942f8 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -256,7 +256,7 @@ shadow_check_gl1e(struct vcpu *v, walk_t *gw)
         return 0;
 
     /* Can't just pull-through because mfn may have changed */
-    l1p = map_domain_page(mfn_x(gw->l1mfn));
+    l1p = map_domain_page(gw->l1mfn);
     nl1e.l1 = l1p[guest_l1_table_offset(gw->va)].l1;
     unmap_domain_page(l1p);
 
@@ -384,7 +384,7 @@ sh_guest_map_l1e(struct vcpu *v, unsigned long addr,
     {
         if ( gl1mfn )
             *gl1mfn = mfn_x(gw.l1mfn);
-        pl1e = map_domain_page(mfn_x(gw.l1mfn)) +
+        pl1e = map_domain_page(gw.l1mfn) +
             (guest_l1_table_offset(addr) * sizeof(guest_l1e_t));
     }
 
diff --git a/xen/arch/x86/mm/shadow/private.h b/xen/arch/x86/mm/shadow/private.h
index eff39dc..31b36ef 100644
--- a/xen/arch/x86/mm/shadow/private.h
+++ b/xen/arch/x86/mm/shadow/private.h
@@ -508,7 +508,7 @@ sh_mfn_is_a_page_table(mfn_t gmfn)
 static inline void *
 sh_map_domain_page(mfn_t mfn)
 {
-    return map_domain_page(mfn_x(mfn));
+    return map_domain_page(mfn);
 }
 
 static inline void
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index c73aa1b..fd61610 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -668,7 +668,7 @@ static void cpu_smpboot_free(unsigned int cpu)
     if ( per_cpu(stubs.addr, cpu) )
     {
         unsigned long mfn = per_cpu(stubs.mfn, cpu);
-        unsigned char *stub_page = map_domain_page(mfn);
+        unsigned char *stub_page = map_domain_page(_mfn(mfn));
         unsigned int i;
 
         memset(stub_page + STUB_BUF_CPU_OFFS(cpu), 0xcc, STUB_BUF_SIZE);
diff --git a/xen/arch/x86/tboot.c b/xen/arch/x86/tboot.c
index 01b9530..88142d2 100644
--- a/xen/arch/x86/tboot.c
+++ b/xen/arch/x86/tboot.c
@@ -161,7 +161,7 @@ static void update_iommu_mac(vmac_ctx_t *ctx, uint64_t 
pt_maddr, int level)
     if ( pt_maddr == 0 )
         return;
 
-    pt_vaddr = (struct dma_pte *)map_domain_page(pt_maddr >> PAGE_SHIFT_4K);
+    pt_vaddr = (struct dma_pte *)map_domain_page(_mfn(paddr_to_pfn(pt_maddr)));
     vmac_update((void *)pt_vaddr, PAGE_SIZE, ctx);
 
     for ( i = 0; i < PTE_NUM; i++ )
@@ -194,7 +194,8 @@ static void update_pagetable_mac(vmac_ctx_t *ctx)
         {
             if ( page->count_info & PGC_page_table )
             {
-                void *pg = map_domain_page(mfn);
+                void *pg = map_domain_page(_mfn(mfn));
+
                 vmac_update(pg, PAGE_SIZE, ctx);
                 unmap_domain_page(pg);
             }
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index c07bbae..2dc0666 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -1325,7 +1325,7 @@ static enum pf_type __page_fault_type(
 
     mfn = cr3 >> PAGE_SHIFT;
 
-    l4t = map_domain_page(mfn);
+    l4t = map_domain_page(_mfn(mfn));
     l4e = l4e_read_atomic(&l4t[l4_table_offset(addr)]);
     mfn = l4e_get_pfn(l4e);
     unmap_domain_page(l4t);
@@ -1334,7 +1334,7 @@ static enum pf_type __page_fault_type(
         return real_fault;
     page_user &= l4e_get_flags(l4e);
 
-    l3t  = map_domain_page(mfn);
+    l3t  = map_domain_page(_mfn(mfn));
     l3e = l3e_read_atomic(&l3t[l3_table_offset(addr)]);
     mfn = l3e_get_pfn(l3e);
     unmap_domain_page(l3t);
@@ -1345,7 +1345,7 @@ static enum pf_type __page_fault_type(
     if ( l3e_get_flags(l3e) & _PAGE_PSE )
         goto leaf;
 
-    l2t = map_domain_page(mfn);
+    l2t = map_domain_page(_mfn(mfn));
     l2e = l2e_read_atomic(&l2t[l2_table_offset(addr)]);
     mfn = l2e_get_pfn(l2e);
     unmap_domain_page(l2t);
@@ -1356,7 +1356,7 @@ static enum pf_type __page_fault_type(
     if ( l2e_get_flags(l2e) & _PAGE_PSE )
         goto leaf;
 
-    l1t = map_domain_page(mfn);
+    l1t = map_domain_page(_mfn(mfn));
     l1e = l1e_read_atomic(&l1t[l1_table_offset(addr)]);
     mfn = l1e_get_pfn(l1e);
     unmap_domain_page(l1t);
@@ -2201,7 +2201,7 @@ static int emulate_privileged_op(struct cpu_user_regs 
*regs)
      * context. This is needed for some systems which (ab)use IN/OUT
      * to communicate with BIOS code in system-management mode.
      */
-    io_emul_stub = map_domain_page(this_cpu(stubs.mfn)) +
+    io_emul_stub = map_domain_page(_mfn(this_cpu(stubs.mfn))) +
                    (this_cpu(stubs.addr) & ~PAGE_MASK) +
                    STUB_BUF_SIZE / 2;
     /* movq $host_to_guest_gpr_switch,%rcx */
@@ -2397,7 +2397,7 @@ static int emulate_privileged_op(struct cpu_user_regs 
*regs)
             else
             {
                 l4_pgentry_t *pl4e =
-                    map_domain_page(pagetable_get_pfn(v->arch.guest_table));
+                    
map_domain_page(_mfn(pagetable_get_pfn(v->arch.guest_table)));
 
                 mfn = l4e_get_pfn(*pl4e);
                 unmap_domain_page(pl4e);
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index db5346c..98310f3 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -59,7 +59,7 @@ void *do_page_walk(struct vcpu *v, unsigned long addr)
     if ( !is_pv_vcpu(v) || !is_canonical_address(addr) )
         return NULL;
 
-    l4t = map_domain_page(mfn);
+    l4t = map_domain_page(_mfn(mfn));
     l4e = l4t[l4_table_offset(addr)];
     unmap_domain_page(l4t);
     if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) )
@@ -77,7 +77,7 @@ void *do_page_walk(struct vcpu *v, unsigned long addr)
         goto ret;
     }
 
-    l2t = map_domain_page(mfn);
+    l2t = map_domain_page(_mfn(mfn));
     l2e = l2t[l2_table_offset(addr)];
     unmap_domain_page(l2t);
     mfn = l2e_get_pfn(l2e);
@@ -89,7 +89,7 @@ void *do_page_walk(struct vcpu *v, unsigned long addr)
         goto ret;
     }
 
-    l1t = map_domain_page(mfn);
+    l1t = map_domain_page(_mfn(mfn));
     l1e = l1t[l1_table_offset(addr)];
     unmap_domain_page(l1t);
     mfn = l1e_get_pfn(l1e);
@@ -97,7 +97,7 @@ void *do_page_walk(struct vcpu *v, unsigned long addr)
         return NULL;
 
  ret:
-    return map_domain_page(mfn) + (addr & ~PAGE_MASK);
+    return map_domain_page(_mfn(mfn)) + (addr & ~PAGE_MASK);
 }
 
 /*
@@ -1197,7 +1197,7 @@ int handle_memadd_fault(unsigned long addr, struct 
cpu_user_regs *regs)
 
     mfn = (read_cr3()) >> PAGE_SHIFT;
 
-    pl4e = map_domain_page(mfn);
+    pl4e = map_domain_page(_mfn(mfn));
 
     l4e = pl4e[0];
 
@@ -1206,7 +1206,7 @@ int handle_memadd_fault(unsigned long addr, struct 
cpu_user_regs *regs)
 
     mfn = l4e_get_pfn(l4e);
     /* We don't need get page type here since it is current CR3 */
-    pl3e = map_domain_page(mfn);
+    pl3e = map_domain_page(_mfn(mfn));
 
     l3e = pl3e[3];
 
@@ -1214,7 +1214,7 @@ int handle_memadd_fault(unsigned long addr, struct 
cpu_user_regs *regs)
         goto unmap;
 
     mfn = l3e_get_pfn(l3e);
-    pl2e = map_domain_page(mfn);
+    pl2e = map_domain_page(_mfn(mfn));
 
     l2e = pl2e[l2_table_offset(addr)];
 
diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c
index 61bd053..0846a19 100644
--- a/xen/arch/x86/x86_64/traps.c
+++ b/xen/arch/x86/x86_64/traps.c
@@ -175,7 +175,7 @@ void show_page_walk(unsigned long addr)
     if ( !is_canonical_address(addr) )
         return;
 
-    l4t = map_domain_page(mfn);
+    l4t = map_domain_page(_mfn(mfn));
     l4e = l4t[l4_table_offset(addr)];
     unmap_domain_page(l4t);
     mfn = l4e_get_pfn(l4e);
@@ -187,7 +187,7 @@ void show_page_walk(unsigned long addr)
          !mfn_valid(mfn) )
         return;
 
-    l3t = map_domain_page(mfn);
+    l3t = map_domain_page(_mfn(mfn));
     l3e = l3t[l3_table_offset(addr)];
     unmap_domain_page(l3t);
     mfn = l3e_get_pfn(l3e);
@@ -201,7 +201,7 @@ void show_page_walk(unsigned long addr)
          !mfn_valid(mfn) )
         return;
 
-    l2t = map_domain_page(mfn);
+    l2t = map_domain_page(_mfn(mfn));
     l2e = l2t[l2_table_offset(addr)];
     unmap_domain_page(l2t);
     mfn = l2e_get_pfn(l2e);
@@ -215,7 +215,7 @@ void show_page_walk(unsigned long addr)
          !mfn_valid(mfn) )
         return;
 
-    l1t = map_domain_page(mfn);
+    l1t = map_domain_page(_mfn(mfn));
     l1e = l1t[l1_table_offset(addr)];
     unmap_domain_page(l1t);
     mfn = l1e_get_pfn(l1e);
@@ -381,7 +381,7 @@ void __devinit subarch_percpu_traps_init(void)
     /* IST_MAX IST pages + 1 syscall page + 1 guard page + primary stack. */
     BUILD_BUG_ON((IST_MAX + 2) * PAGE_SIZE + PRIMARY_STACK_SIZE > STACK_SIZE);
 
-    stub_page = map_domain_page(this_cpu(stubs.mfn));
+    stub_page = map_domain_page(_mfn(this_cpu(stubs.mfn)));
 
     /* Trampoline for SYSCALL entry from 64-bit mode. */
     wrmsrl(MSR_LSTAR, stub_va);
diff --git a/xen/arch/x86/x86_emulate.c b/xen/arch/x86/x86_emulate.c
index 51c8e44..28132b5 100644
--- a/xen/arch/x86/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate.c
@@ -23,11 +23,11 @@
 #define cpu_has_amd_erratum(nr) \
         cpu_has_amd_erratum(&current_cpu_data, AMD_ERRATUM_##nr)
 
-#define get_stub(stb) ({                                   \
-    BUILD_BUG_ON(STUB_BUF_SIZE / 2 < MAX_INST_LEN + 1);    \
-    (stb).addr = this_cpu(stubs.addr) + STUB_BUF_SIZE / 2; \
-    ((stb).ptr = map_domain_page(this_cpu(stubs.mfn))) +   \
-        ((stb).addr & ~PAGE_MASK);                         \
+#define get_stub(stb) ({                                        \
+    BUILD_BUG_ON(STUB_BUF_SIZE / 2 < MAX_INST_LEN + 1);         \
+    (stb).addr = this_cpu(stubs.addr) + STUB_BUF_SIZE / 2;      \
+    ((stb).ptr = map_domain_page(_mfn(this_cpu(stubs.mfn)))) +  \
+        ((stb).addr & ~PAGE_MASK);                              \
 })
 #define put_stub(stb) ({                                   \
     if ( (stb).ptr )                                       \
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index 681a553..92f078e 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -2427,7 +2427,7 @@ static int gnttab_copy_claim_buf(const struct gnttab_copy 
*op,
         buf->have_type = 1;
     }
 
-    buf->virt = map_domain_page(buf->frame);
+    buf->virt = map_domain_page(_mfn(buf->frame));
     rc = GNTST_okay;
 
  out:
@@ -2945,7 +2945,7 @@ static int __gnttab_cache_flush(gnttab_cache_flush_t 
*cflush,
         }
     }
 
-    v = map_domain_page(mfn);
+    v = map_domain_page(_mfn(mfn));
     v += cflush->offset;
 
     if ( (cflush->op & GNTTAB_CACHE_INVAL) && (cflush->op & 
GNTTAB_CACHE_CLEAN) )
diff --git a/xen/common/kexec.c b/xen/common/kexec.c
index 7d91547..7dd2700 100644
--- a/xen/common/kexec.c
+++ b/xen/common/kexec.c
@@ -912,7 +912,7 @@ static int kexec_segments_from_ind_page(unsigned long mfn,
     kimage_entry_t *entry;
     int ret = 0;
 
-    page = map_domain_page(mfn);
+    page = map_domain_page(_mfn(mfn));
 
     /*
      * Walk the indirection page list, adding destination pages to the
@@ -934,7 +934,7 @@ static int kexec_segments_from_ind_page(unsigned long mfn,
             break;
         case IND_INDIRECTION:
             unmap_domain_page(page);
-            entry = page = map_domain_page(mfn);
+            entry = page = map_domain_page(_mfn(mfn));
             continue;
         case IND_DONE:
             goto done;
diff --git a/xen/common/kimage.c b/xen/common/kimage.c
index 742e4e8..dcc010e 100644
--- a/xen/common/kimage.c
+++ b/xen/common/kimage.c
@@ -495,10 +495,10 @@ static void kimage_terminate(struct kexec_image *image)
  * Call unmap_domain_page(ptr) after the loop exits.
  */
 #define for_each_kimage_entry(image, ptr, entry)                        \
-    for ( ptr = map_domain_page(image->head >> PAGE_SHIFT);             \
+    for ( ptr = map_domain_page(_mfn(paddr_to_pfn(image->head)));       \
           (entry = *ptr) && !(entry & IND_DONE);                        \
           ptr = (entry & IND_INDIRECTION) ?                             \
-              (unmap_domain_page(ptr), map_domain_page(entry >> PAGE_SHIFT)) \
+              (unmap_domain_page(ptr), 
map_domain_page(_mfn(paddr_to_pfn(entry)))) \
               : ptr + 1 )
 
 static void kimage_free_entry(kimage_entry_t entry)
@@ -748,7 +748,7 @@ static int kimage_load_crash_segment(struct kexec_image 
*image,
         dchunk = PAGE_SIZE;
         schunk = min(dchunk, sbytes);
 
-        dest_va = map_domain_page(dest_mfn);
+        dest_va = map_domain_page(_mfn(dest_mfn));
         if ( !dest_va )
             return -EINVAL;
 
@@ -866,7 +866,7 @@ int kimage_build_ind(struct kexec_image *image, unsigned 
long ind_mfn,
     int ret = 0;
     paddr_t dest = KIMAGE_NO_DEST;
 
-    page = map_domain_page(ind_mfn);
+    page = map_domain_page(_mfn(ind_mfn));
     if ( !page )
         return -ENOMEM;
 
@@ -892,7 +892,7 @@ int kimage_build_ind(struct kexec_image *image, unsigned 
long ind_mfn,
             break;
         case IND_INDIRECTION:
             unmap_domain_page(page);
-            page = map_domain_page(mfn);
+            page = map_domain_page(_mfn(mfn));
             entry = page;
             continue;
         case IND_DONE:
diff --git a/xen/common/memory.c b/xen/common/memory.c
index ae4c32e..e5d49d8 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -1172,7 +1172,7 @@ long do_memory_op(unsigned long cmd, 
XEN_GUEST_HANDLE_PARAM(void) arg)
 
 void clear_domain_page(mfn_t mfn)
 {
-    void *ptr = map_domain_page(mfn_x(mfn));
+    void *ptr = map_domain_page(mfn);
 
     clear_page(ptr);
     unmap_domain_page(ptr);
@@ -1180,8 +1180,8 @@ void clear_domain_page(mfn_t mfn)
 
 void copy_domain_page(mfn_t dest, mfn_t source)
 {
-    const void *src = map_domain_page(mfn_x(source));
-    void *dst = map_domain_page(mfn_x(dest));
+    const void *src = map_domain_page(source);
+    void *dst = map_domain_page(dest);
 
     copy_page(dst, src);
     unmap_domain_page(dst);
diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c
index 5ef131b..71cb7d5 100644
--- a/xen/common/tmem_xen.c
+++ b/xen/common/tmem_xen.c
@@ -77,7 +77,7 @@ static inline void *cli_get_page(xen_pfn_t cmfn, unsigned 
long *pcli_mfn,
 
     *pcli_mfn = page_to_mfn(page);
     *pcli_pfp = page;
-    return map_domain_page(*pcli_mfn);
+    return map_domain_page(_mfn(*pcli_mfn));
 }
 
 static inline void cli_put_page(void *cli_va, struct page_info *cli_pfp,
@@ -104,7 +104,7 @@ int tmem_copy_from_client(struct page_info *pfp,
 
     ASSERT(pfp != NULL);
     tmem_mfn = page_to_mfn(pfp);
-    tmem_va = map_domain_page(tmem_mfn);
+    tmem_va = map_domain_page(_mfn(tmem_mfn));
     if ( guest_handle_is_null(clibuf) )
     {
         cli_va = cli_get_page(cmfn, &cli_mfn, &cli_pfp, 0);
@@ -174,7 +174,7 @@ int tmem_copy_to_client(xen_pfn_t cmfn, struct page_info 
*pfp,
             return -EFAULT;
     }
     tmem_mfn = page_to_mfn(pfp);
-    tmem_va = map_domain_page(tmem_mfn);
+    tmem_va = map_domain_page(_mfn(tmem_mfn));
     if ( cli_va )
     {
         memcpy(cli_va, tmem_va, PAGE_SIZE);
diff --git a/xen/drivers/passthrough/amd/iommu_guest.c 
b/xen/drivers/passthrough/amd/iommu_guest.c
index 7b0c102..b513073 100644
--- a/xen/drivers/passthrough/amd/iommu_guest.c
+++ b/xen/drivers/passthrough/amd/iommu_guest.c
@@ -203,7 +203,7 @@ void guest_iommu_add_ppr_log(struct domain *d, u32 entry[])
                                     sizeof(ppr_entry_t), tail);
     ASSERT(mfn_valid(mfn));
 
-    log_base = map_domain_page(mfn);
+    log_base = map_domain_page(_mfn(mfn));
     log = log_base + tail % (PAGE_SIZE / sizeof(ppr_entry_t));
 
     /* Convert physical device id back into virtual device id */
@@ -252,7 +252,7 @@ void guest_iommu_add_event_log(struct domain *d, u32 
entry[])
                                     sizeof(event_entry_t), tail);
     ASSERT(mfn_valid(mfn));
 
-    log_base = map_domain_page(mfn);
+    log_base = map_domain_page(_mfn(mfn));
     log = log_base + tail % (PAGE_SIZE / sizeof(event_entry_t));
 
     /* re-write physical device id into virtual device id */
@@ -377,7 +377,7 @@ static int do_completion_wait(struct domain *d, cmd_entry_t 
*cmd)
         gaddr_64 = (gaddr_hi << 32) | (gaddr_lo << 3);
 
         gfn = gaddr_64 >> PAGE_SHIFT;
-        vaddr = map_domain_page(mfn_x(get_gfn(d, gfn ,&p2mt)));
+        vaddr = map_domain_page(get_gfn(d, gfn ,&p2mt));
         put_gfn(d, gfn);
 
         write_u64_atomic((uint64_t *)(vaddr + (gaddr_64 & (PAGE_SIZE-1))),
@@ -425,7 +425,7 @@ static int do_invalidate_dte(struct domain *d, cmd_entry_t 
*cmd)
     ASSERT(mfn_valid(dte_mfn));
 
     /* Read guest dte information */
-    dte_base = map_domain_page(dte_mfn);
+    dte_base = map_domain_page(_mfn(dte_mfn));
 
     gdte = dte_base + gbdf % (PAGE_SIZE / sizeof(dev_entry_t));
 
@@ -506,7 +506,7 @@ static void guest_iommu_process_command(unsigned long _d)
                                             sizeof(cmd_entry_t), head);
         ASSERT(mfn_valid(cmd_mfn));
 
-        cmd_base = map_domain_page(cmd_mfn);
+        cmd_base = map_domain_page(_mfn(cmd_mfn));
         cmd = cmd_base + head % entries_per_page;
 
         opcode = get_field_from_reg_u32(cmd->data[1],
diff --git a/xen/drivers/passthrough/amd/iommu_map.c 
b/xen/drivers/passthrough/amd/iommu_map.c
index 64c5225..586c441 100644
--- a/xen/drivers/passthrough/amd/iommu_map.c
+++ b/xen/drivers/passthrough/amd/iommu_map.c
@@ -42,7 +42,7 @@ void clear_iommu_pte_present(unsigned long l1_mfn, unsigned 
long gfn)
 {
     u64 *table, *pte;
 
-    table = map_domain_page(l1_mfn);
+    table = map_domain_page(_mfn(l1_mfn));
     pte = table + pfn_to_pde_idx(gfn, IOMMU_PAGING_MODE_LEVEL_1);
     *pte = 0;
     unmap_domain_page(table);
@@ -115,7 +115,7 @@ static bool_t set_iommu_pte_present(unsigned long pt_mfn, 
unsigned long gfn,
     u32 *pde;
     bool_t need_flush = 0;
 
-    table = map_domain_page(pt_mfn);
+    table = map_domain_page(_mfn(pt_mfn));
 
     pde = (u32*)(table + pfn_to_pde_idx(gfn, pde_level));
 
@@ -349,12 +349,12 @@ static int iommu_update_pde_count(struct domain *d, 
unsigned long pt_mfn,
     next_level = merge_level - 1;
 
     /* get pde at merge level */
-    table = map_domain_page(pt_mfn);
+    table = map_domain_page(_mfn(pt_mfn));
     pde = table + pfn_to_pde_idx(gfn, merge_level);
 
     /* get page table of next level */
     ntable_maddr = amd_iommu_get_next_table_from_pte((u32*)pde);
-    ntable = map_domain_page(ntable_maddr >> PAGE_SHIFT);
+    ntable = map_domain_page(_mfn(paddr_to_pfn(ntable_maddr)));
 
     /* get the first mfn of next level */
     first_mfn = amd_iommu_get_next_table_from_pte((u32*)ntable) >> PAGE_SHIFT;
@@ -400,7 +400,7 @@ static int iommu_merge_pages(struct domain *d, unsigned 
long pt_mfn,
 
     ASSERT( spin_is_locked(&hd->arch.mapping_lock) && pt_mfn );
 
-    table = map_domain_page(pt_mfn);
+    table = map_domain_page(_mfn(pt_mfn));
     pde = table + pfn_to_pde_idx(gfn, merge_level);
 
     /* get first mfn */
@@ -412,7 +412,7 @@ static int iommu_merge_pages(struct domain *d, unsigned 
long pt_mfn,
         return 1;
     }
 
-    ntable = map_domain_page(ntable_mfn);
+    ntable = map_domain_page(_mfn(ntable_mfn));
     first_mfn = amd_iommu_get_next_table_from_pte((u32*)ntable) >> PAGE_SHIFT;
 
     if ( first_mfn == 0 )
@@ -467,7 +467,7 @@ static int iommu_pde_from_gfn(struct domain *d, unsigned 
long pfn,
         unsigned int next_level = level - 1;
         pt_mfn[level] = next_table_mfn;
 
-        next_table_vaddr = map_domain_page(next_table_mfn);
+        next_table_vaddr = map_domain_page(_mfn(next_table_mfn));
         pde = next_table_vaddr + pfn_to_pde_idx(pfn, level);
 
         /* Here might be a super page frame */
diff --git a/xen/drivers/passthrough/vtd/x86/vtd.c 
b/xen/drivers/passthrough/vtd/x86/vtd.c
index 109234e..8beec8c 100644
--- a/xen/drivers/passthrough/vtd/x86/vtd.c
+++ b/xen/drivers/passthrough/vtd/x86/vtd.c
@@ -41,7 +41,7 @@ boolean_param("iommu_inclusive_mapping", 
iommu_inclusive_mapping);
 
 void *map_vtd_domain_page(u64 maddr)
 {
-    return map_domain_page(maddr >> PAGE_SHIFT_4K);
+    return map_domain_page(_mfn(paddr_to_pfn(maddr)));
 }
 
 void unmap_vtd_domain_page(void *va)
diff --git a/xen/include/asm-x86/hap.h b/xen/include/asm-x86/hap.h
index 7876527..ca590f3 100644
--- a/xen/include/asm-x86/hap.h
+++ b/xen/include/asm-x86/hap.h
@@ -37,7 +37,7 @@
 static inline void *
 hap_map_domain_page(mfn_t mfn)
 {
-    return map_domain_page(mfn_x(mfn));
+    return map_domain_page(mfn);
 }
 
 static inline void
diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
index e26daaf..87b3341 100644
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -172,9 +172,9 @@ static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, 
unsigned int flags)
 #define l3e_to_l2e(x)              ((l2_pgentry_t *)__va(l3e_get_paddr(x)))
 #define l4e_to_l3e(x)              ((l3_pgentry_t *)__va(l4e_get_paddr(x)))
 
-#define map_l1t_from_l2e(x)        ((l1_pgentry_t 
*)map_domain_page(l2e_get_pfn(x)))
-#define map_l2t_from_l3e(x)        ((l2_pgentry_t 
*)map_domain_page(l3e_get_pfn(x)))
-#define map_l3t_from_l4e(x)        ((l3_pgentry_t 
*)map_domain_page(l4e_get_pfn(x)))
+#define map_l1t_from_l2e(x)        ((l1_pgentry_t 
*)map_domain_page(_mfn(l2e_get_pfn(x))))
+#define map_l2t_from_l3e(x)        ((l2_pgentry_t 
*)map_domain_page(_mfn(l3e_get_pfn(x))))
+#define map_l3t_from_l4e(x)        ((l3_pgentry_t 
*)map_domain_page(_mfn(l4e_get_pfn(x))))
 
 /* Given a virtual address, get an entry offset into a page table. */
 #define l1_table_offset(a)         \
@@ -234,6 +234,7 @@ void copy_page_sse2(void *, const void *);
 #define __pfn_to_paddr(pfn) ((paddr_t)(pfn) << PAGE_SHIFT)
 #define __paddr_to_pfn(pa)  ((unsigned long)((pa) >> PAGE_SHIFT))
 
+
 /* Convert between machine frame numbers and spage-info structures. */
 #define __mfn_to_spage(mfn)  (spage_table + pfn_to_sdx(mfn))
 #define __spage_to_mfn(pg)   sdx_to_pfn((unsigned long)((pg) - spage_table))
diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h
index 9c32665..7a09881 100644
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -376,7 +376,7 @@ guest_map_l1e(struct vcpu *v, unsigned long addr, unsigned 
long *gl1mfn)
          != _PAGE_PRESENT )
         return NULL;
     *gl1mfn = l2e_get_pfn(l2e);
-    return (l1_pgentry_t *)map_domain_page(*gl1mfn) + l1_table_offset(addr);
+    return (l1_pgentry_t *)map_domain_page(_mfn(*gl1mfn)) + 
l1_table_offset(addr);
 }
 
 /* Pull down the mapping we got from guest_map_l1e() */
diff --git a/xen/include/xen/domain_page.h b/xen/include/xen/domain_page.h
index 9bfeef0..c1d630c 100644
--- a/xen/include/xen/domain_page.h
+++ b/xen/include/xen/domain_page.h
@@ -23,7 +23,7 @@ void copy_domain_page(mfn_t dst, const mfn_t src);
  * Map a given page frame, returning the mapped virtual address. The page is
  * then accessible within the current VCPU until a corresponding unmap call.
  */
-void *map_domain_page(unsigned long mfn);
+void *map_domain_page(mfn_t mfn);
 
 /*
  * Pass a VA within a page previously mapped in the context of the
@@ -44,7 +44,7 @@ unsigned long domain_page_map_to_mfn(const void *va);
 void *map_domain_page_global(mfn_t mfn);
 void unmap_domain_page_global(const void *va);
 
-#define __map_domain_page(pg)        map_domain_page(__page_to_mfn(pg))
+#define __map_domain_page(pg)        map_domain_page(_mfn(__page_to_mfn(pg)))
 
 static inline void *__map_domain_page_global(const struct page_info *pg)
 {
@@ -84,7 +84,7 @@ map_domain_page_with_cache(unsigned long mfn, struct 
domain_mmap_cache *cache)
     }
 
     cache->mfn   = mfn;
-    cache->va    = map_domain_page(mfn);
+    cache->va    = map_domain_page(_mfn(mfn));
     cache->flags = DMCACHE_ENTRY_HELD | DMCACHE_ENTRY_VALID;
 
  done:
@@ -113,7 +113,7 @@ domain_mmap_cache_destroy(struct domain_mmap_cache *cache)
 
 #else /* !CONFIG_DOMAIN_PAGE */
 
-#define map_domain_page(mfn)                mfn_to_virt(mfn)
+#define map_domain_page(mfn)                mfn_to_virt(mfn_x(mfn))
 #define __map_domain_page(pg)               page_to_virt(pg)
 #define unmap_domain_page(va)               ((void)(va))
 #define domain_page_map_to_mfn(va)          virt_to_mfn((unsigned long)(va))
-- 
2.1.4


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.