[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 09/17] xen/x86: Reduce the number of use of l*e_{from, get}_pfn()



From: Julien Grall <jgrall@xxxxxxxxxx>

It is preferable to use the typesafe l*e_{from, get}_mfn(). Sadly, this
can't be used everywhere easily, so for now only replace the simple ones.

No functional changes intended.

Signed-off-by: Julien Grall <jgrall@xxxxxxxxxx>
---
 xen/arch/x86/machine_kexec.c |  2 +-
 xen/arch/x86/mm.c            | 30 +++++++++++++++---------------
 xen/arch/x86/setup.c         |  2 +-
 xen/include/asm-x86/page.h   |  2 +-
 4 files changed, 18 insertions(+), 18 deletions(-)

diff --git a/xen/arch/x86/machine_kexec.c b/xen/arch/x86/machine_kexec.c
index b70d5a6a86..b69c2e5fad 100644
--- a/xen/arch/x86/machine_kexec.c
+++ b/xen/arch/x86/machine_kexec.c
@@ -86,7 +86,7 @@ int machine_kexec_add_page(struct kexec_image *image, 
unsigned long vaddr,
 
     l1 = __map_domain_page(l1_page);
     l1 += l1_table_offset(vaddr);
-    l1e_write(l1, l1e_from_pfn(maddr >> PAGE_SHIFT, __PAGE_HYPERVISOR));
+    l1e_write(l1, l1e_from_mfn(maddr_to_mfn(maddr), __PAGE_HYPERVISOR));
 
     ret = 0;
 out:
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 65bc03984d..2516548e49 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1138,7 +1138,7 @@ static int
 get_page_from_l2e(
     l2_pgentry_t l2e, mfn_t l2mfn, struct domain *d, unsigned int flags)
 {
-    unsigned long mfn = l2e_get_pfn(l2e);
+    mfn_t mfn = l2e_get_mfn(l2e);
     int rc;
 
     if ( unlikely((l2e_get_flags(l2e) & L2_DISALLOW_MASK)) )
@@ -1150,7 +1150,7 @@ get_page_from_l2e(
 
     ASSERT(!(flags & PTF_preemptible));
 
-    rc = get_page_and_type_from_mfn(_mfn(mfn), PGT_l1_page_table, d, flags);
+    rc = get_page_and_type_from_mfn(mfn, PGT_l1_page_table, d, flags);
     if ( unlikely(rc == -EINVAL) && get_l2_linear_pagetable(l2e, l2mfn, d) )
         rc = 0;
 
@@ -1209,14 +1209,14 @@ static int _put_page_type(struct page_info *page, 
unsigned int flags,
 
 void put_page_from_l1e(l1_pgentry_t l1e, struct domain *l1e_owner)
 {
-    unsigned long     pfn = l1e_get_pfn(l1e);
+    mfn_t mfn = l1e_get_mfn(l1e);
     struct page_info *page;
     struct domain    *pg_owner;
 
-    if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) || is_iomem_page(_mfn(pfn)) )
+    if ( !(l1e_get_flags(l1e) & _PAGE_PRESENT) || is_iomem_page(mfn) )
         return;
 
-    page = mfn_to_page(_mfn(pfn));
+    page = mfn_to_page(mfn);
     pg_owner = page_get_owner(page);
 
     /*
@@ -5219,8 +5219,8 @@ int map_pages_to_xen(
 
             for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
                 l2e_write(l2t + i,
-                          l2e_from_pfn(l3e_get_pfn(ol3e) +
-                                       (i << PAGETABLE_ORDER),
+                          l2e_from_mfn(mfn_add(l3e_get_mfn(ol3e),
+                                               (i << PAGETABLE_ORDER)),
                                        l3e_get_flags(ol3e)));
 
             if ( l3e_get_flags(ol3e) & _PAGE_GLOBAL )
@@ -5320,7 +5320,7 @@ int map_pages_to_xen(
 
                 for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
                     l1e_write(&l1t[i],
-                              l1e_from_pfn(l2e_get_pfn(*pl2e) + i,
+                              l1e_from_mfn(mfn_add(l2e_get_mfn(*pl2e), i),
                                            lNf_to_l1f(l2e_get_flags(*pl2e))));
 
                 if ( l2e_get_flags(*pl2e) & _PAGE_GLOBAL )
@@ -5391,7 +5391,7 @@ int map_pages_to_xen(
                 l1t = l2e_to_l1e(ol2e);
                 base_mfn = l1e_get_pfn(l1t[0]) & ~(L1_PAGETABLE_ENTRIES - 1);
                 for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
-                    if ( (l1e_get_pfn(l1t[i]) != (base_mfn + i)) ||
+                    if ( !mfn_eq(l1e_get_mfn(l1t[i]), _mfn(base_mfn + i)) ||
                          (l1e_get_flags(l1t[i]) != flags) )
                         break;
                 if ( i == L1_PAGETABLE_ENTRIES )
@@ -5521,7 +5521,7 @@ int modify_xen_mappings(unsigned long s, unsigned long e, 
unsigned int nf)
             {
                 /* PAGE1GB: whole superpage is modified. */
                 l3_pgentry_t nl3e = !(nf & _PAGE_PRESENT) ? l3e_empty()
-                    : l3e_from_pfn(l3e_get_pfn(*pl3e),
+                    : l3e_from_mfn(l3e_get_mfn(*pl3e),
                                    (l3e_get_flags(*pl3e) & ~FLAGS_MASK) | nf);
 
                 l3e_write_atomic(pl3e, nl3e);
@@ -5535,8 +5535,8 @@ int modify_xen_mappings(unsigned long s, unsigned long e, 
unsigned int nf)
                 return -ENOMEM;
             for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ )
                 l2e_write(l2t + i,
-                          l2e_from_pfn(l3e_get_pfn(*pl3e) +
-                                       (i << PAGETABLE_ORDER),
+                          l2e_from_mfn(mfn_add(l3e_get_mfn(*pl3e),
+                                               (i << PAGETABLE_ORDER)),
                                        l3e_get_flags(*pl3e)));
             if ( locking )
                 spin_lock(&map_pgdir_lock);
@@ -5576,7 +5576,7 @@ int modify_xen_mappings(unsigned long s, unsigned long e, 
unsigned int nf)
             {
                 /* PSE: whole superpage is modified. */
                 l2_pgentry_t nl2e = !(nf & _PAGE_PRESENT) ? l2e_empty()
-                    : l2e_from_pfn(l2e_get_pfn(*pl2e),
+                    : l2e_from_mfn(l2e_get_mfn(*pl2e),
                                    (l2e_get_flags(*pl2e) & ~FLAGS_MASK) | nf);
 
                 l2e_write_atomic(pl2e, nl2e);
@@ -5592,7 +5592,7 @@ int modify_xen_mappings(unsigned long s, unsigned long e, 
unsigned int nf)
                     return -ENOMEM;
                 for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
                     l1e_write(&l1t[i],
-                              l1e_from_pfn(l2e_get_pfn(*pl2e) + i,
+                              l1e_from_mfn(mfn_add(l2e_get_mfn(*pl2e), i),
                                            l2e_get_flags(*pl2e) & ~_PAGE_PSE));
                 if ( locking )
                     spin_lock(&map_pgdir_lock);
@@ -5625,7 +5625,7 @@ int modify_xen_mappings(unsigned long s, unsigned long e, 
unsigned int nf)
                 ASSERT(!(nf & _PAGE_PRESENT));
 
             nl1e = !(nf & _PAGE_PRESENT) ? l1e_empty()
-                : l1e_from_pfn(l1e_get_pfn(*pl1e),
+                : l1e_from_mfn(l1e_get_mfn(*pl1e),
                                (l1e_get_flags(*pl1e) & ~FLAGS_MASK) | nf);
 
             l1e_write_atomic(pl1e, nl1e);
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index cfe95c5dac..4d1d38dae3 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -1147,7 +1147,7 @@ void __init noreturn __start_xen(unsigned long mbi_p)
             BUG_ON(using_2M_mapping() &&
                    l2_table_offset((unsigned long)_erodata) ==
                    l2_table_offset((unsigned long)_stext));
-            *pl2e++ = l2e_from_pfn(xen_phys_start >> PAGE_SHIFT,
+            *pl2e++ = l2e_from_mfn(maddr_to_mfn(xen_phys_start),
                                    PAGE_HYPERVISOR_RX | _PAGE_PSE);
             for ( i = 1; i < L2_PAGETABLE_ENTRIES; i++, pl2e++ )
             {
diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
index 377ba14f6e..8d581cd1e7 100644
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -270,7 +270,7 @@ void copy_page_sse2(void *, const void *);
 #define pfn_to_paddr(pfn)   __pfn_to_paddr(pfn)
 #define paddr_to_pfn(pa)    __paddr_to_pfn(pa)
 #define paddr_to_pdx(pa)    pfn_to_pdx(paddr_to_pfn(pa))
-#define vmap_to_mfn(va)     _mfn(l1e_get_pfn(*virt_to_xen_l1e((unsigned 
long)(va))))
+#define vmap_to_mfn(va)     l1e_get_mfn(*virt_to_xen_l1e((unsigned long)(va)))
 #define vmap_to_page(va)    mfn_to_page(vmap_to_mfn(va))
 
 #endif /* !defined(__ASSEMBLY__) */
-- 
2.17.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.