[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86: properly use map_domain_page() in miscellaneous places


  • To: xen-changelog@xxxxxxxxxxxxxxxxxxx
  • From: Xen patchbot-unstable <patchbot@xxxxxxx>
  • Date: Mon, 04 Feb 2013 15:44:13 +0000
  • Delivery-date: Mon, 04 Feb 2013 15:44:40 +0000
  • List-id: "Change log for Mercurial \(receive only\)" <xen-changelog.lists.xen.org>

# HG changeset patch
# User Jan Beulich <jbeulich@xxxxxxxx>
# Date 1358946821 -3600
# Node ID 2450f38b4ee08e4860b0de873a1e72587a9a5006
# Parent  0c845035d84c5231719dd2bc1ace2c0483f4a5b6
x86: properly use map_domain_page() in miscellaneous places

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
Acked-by: Keir Fraser <keir@xxxxxxx>
---


diff -r 0c845035d84c -r 2450f38b4ee0 xen/arch/x86/domctl.c
--- a/xen/arch/x86/domctl.c     Wed Jan 23 14:12:54 2013 +0100
+++ b/xen/arch/x86/domctl.c     Wed Jan 23 14:13:41 2013 +0100
@@ -150,7 +150,7 @@ long arch_do_domctl(
                 ret = -ENOMEM;
                 break;
             }
-            arr = page_to_virt(page);
+            arr = __map_domain_page(page);
 
             for ( n = ret = 0; n < num; )
             {
@@ -220,7 +220,9 @@ long arch_do_domctl(
                 n += k;
             }
 
-            free_domheap_page(virt_to_page(arr));
+            page = mfn_to_page(domain_page_map_to_mfn(arr));
+            unmap_domain_page(arr);
+            free_domheap_page(page);
 
             break;
         }
@@ -1347,8 +1349,11 @@ void arch_get_info_guest(struct vcpu *v,
         }
         else
         {
-            l4_pgentry_t *l4e = __va(pagetable_get_paddr(v->arch.guest_table));
+            const l4_pgentry_t *l4e =
+                map_domain_page(pagetable_get_pfn(v->arch.guest_table));
+
             c.cmp->ctrlreg[3] = compat_pfn_to_cr3(l4e_get_pfn(*l4e));
+            unmap_domain_page(l4e);
 
             /* Merge shadow DR7 bits into real DR7. */
             c.cmp->debugreg[7] |= c.cmp->debugreg[5];
diff -r 0c845035d84c -r 2450f38b4ee0 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Wed Jan 23 14:12:54 2013 +0100
+++ b/xen/arch/x86/mm.c Wed Jan 23 14:13:41 2013 +0100
@@ -2538,14 +2538,18 @@ int new_guest_cr3(unsigned long mfn)
 
     if ( is_pv_32on64_domain(d) )
     {
+        unsigned long gt_mfn = pagetable_get_pfn(curr->arch.guest_table);
+        l4_pgentry_t *pl4e = map_domain_page(gt_mfn);
+
         okay = paging_mode_refcounts(d)
             ? 0 /* Old code was broken, but what should it be? */
             : mod_l4_entry(
-                    __va(pagetable_get_paddr(curr->arch.guest_table)),
+                    pl4e,
                     l4e_from_pfn(
                         mfn,
                         (_PAGE_PRESENT|_PAGE_RW|_PAGE_USER|_PAGE_ACCESSED)),
-                    pagetable_get_pfn(curr->arch.guest_table), 0, 0, curr) == 
0;
+                    gt_mfn, 0, 0, curr) == 0;
+        unmap_domain_page(pl4e);
         if ( unlikely(!okay) )
         {
             MEM_LOG("Error while installing new compat baseptr %lx", mfn);
diff -r 0c845035d84c -r 2450f38b4ee0 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Wed Jan 23 14:12:54 2013 +0100
+++ b/xen/arch/x86/mm/shadow/common.c   Wed Jan 23 14:13:41 2013 +0100
@@ -3543,6 +3543,9 @@ int shadow_track_dirty_vram(struct domai
     }
     else
     {
+        unsigned long map_mfn = INVALID_MFN;
+        void *map_sl1p = NULL;
+
         /* Iterate over VRAM to track dirty bits. */
         for ( i = 0; i < nr; i++ ) {
             mfn_t mfn = get_gfn_query_unlocked(d, begin_pfn + i, &t);
@@ -3576,7 +3579,17 @@ int shadow_track_dirty_vram(struct domai
                     {
                         /* Hopefully the most common case: only one mapping,
                          * whose dirty bit we can use. */
-                        l1_pgentry_t *sl1e = maddr_to_virt(sl1ma);
+                        l1_pgentry_t *sl1e;
+                        unsigned long sl1mfn = paddr_to_pfn(sl1ma);
+
+                        if ( sl1mfn != map_mfn )
+                        {
+                            if ( map_sl1p )
+                                sh_unmap_domain_page(map_sl1p);
+                            map_sl1p = sh_map_domain_page(_mfn(sl1mfn));
+                            map_mfn = sl1mfn;
+                        }
+                        sl1e = map_sl1p + (sl1ma & ~PAGE_MASK);
 
                         if ( l1e_get_flags(*sl1e) & _PAGE_DIRTY )
                         {
@@ -3603,6 +3616,9 @@ int shadow_track_dirty_vram(struct domai
             }
         }
 
+        if ( map_sl1p )
+            sh_unmap_domain_page(map_sl1p);
+
         rc = -EFAULT;
         if ( copy_to_guest(dirty_bitmap, dirty_vram->dirty_bitmap, dirty_size) 
== 0 ) {
             memset(dirty_vram->dirty_bitmap, 0, dirty_size);
diff -r 0c845035d84c -r 2450f38b4ee0 xen/arch/x86/traps.c
--- a/xen/arch/x86/traps.c      Wed Jan 23 14:12:54 2013 +0100
+++ b/xen/arch/x86/traps.c      Wed Jan 23 14:13:41 2013 +0100
@@ -2255,7 +2255,11 @@ static int emulate_privileged_op(struct 
             }
             else
             {
-                mfn = l4e_get_pfn(*(l4_pgentry_t 
*)__va(pagetable_get_paddr(v->arch.guest_table)));
+                l4_pgentry_t *pl4e =
+                    map_domain_page(pagetable_get_pfn(v->arch.guest_table));
+
+                mfn = l4e_get_pfn(*pl4e);
+                unmap_domain_page(pl4e);
                 *reg = compat_pfn_to_cr3(mfn_to_gmfn(
                     v->domain, mfn));
             }

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.