[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 10 of 13] x86/mm/p2m: Fix locking discipline around p2m lookups



# HG changeset patch
# User Tim Deegan <Tim.Deegan@xxxxxxxxxx>
# Date 1305302439 -3600
# Node ID ba2902017f010e1a3a756f4053175d71027244e8
# Parent  f2b42201dc1a2fea7f0dbf36c511b780caf55c85
x86/mm/p2m: Fix locking discipline around p2m lookups.

All gfn_to_mfn* functions except _query() might take the p2m lock,
so can't be called with a p2m, shadow, hap or log_dirty lock held.
The remaining offender is the memory sharing code, which calls
_unshare() from inside the pagetable walker!  Fixing that is too big
for a cleanup patch like this one.

Signed-off-by: Tim Deegan <Tim.Deegan@xxxxxxxxxx>

diff -r f2b42201dc1a -r ba2902017f01 xen/arch/x86/hvm/mtrr.c
--- a/xen/arch/x86/hvm/mtrr.c   Fri May 13 17:00:39 2011 +0100
+++ b/xen/arch/x86/hvm/mtrr.c   Fri May 13 17:00:39 2011 +0100
@@ -392,7 +392,7 @@ uint32_t get_pat_flags(struct vcpu *v,
     {
         struct domain *d = v->domain;
         p2m_type_t p2mt;
-        gfn_to_mfn(d, paddr_to_pfn(gpaddr), &p2mt);
+        gfn_to_mfn_query(d, paddr_to_pfn(gpaddr), &p2mt);
         if (p2m_is_ram(p2mt))
             gdprintk(XENLOG_WARNING,
                     "Conflict occurs for a given guest l1e flags:%x "
diff -r f2b42201dc1a -r ba2902017f01 xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Fri May 13 17:00:39 2011 +0100
+++ b/xen/arch/x86/mm/p2m.c     Fri May 13 17:00:39 2011 +0100
@@ -574,7 +574,7 @@ set_mmio_p2m_entry(struct domain *d, uns
     if ( 0 == rc )
         gdprintk(XENLOG_ERR,
             "set_mmio_p2m_entry: set_p2m_entry failed! mfn=%08lx\n",
-            mfn_x(gfn_to_mfn(d, gfn, &ot)));
+            mfn_x(gfn_to_mfn_query(d, gfn, &ot)));
     return rc;
 }
 
@@ -631,8 +631,8 @@ set_shared_p2m_entry(struct domain *d, u
     p2m_unlock(p2m);
     if ( 0 == rc )
         gdprintk(XENLOG_ERR,
-            "set_mmio_p2m_entry: set_p2m_entry failed! mfn=%08lx\n",
-            gmfn_to_mfn(p2m->domain, gfn));
+            "set_shared_p2m_entry: set_p2m_entry failed! mfn=%08lx\n",
+            mfn_x(gfn_to_mfn_query(d, gfn, &ot)));
     return rc;
 }
 
diff -r f2b42201dc1a -r ba2902017f01 xen/arch/x86/mm/shadow/common.c
--- a/xen/arch/x86/mm/shadow/common.c   Fri May 13 17:00:39 2011 +0100
+++ b/xen/arch/x86/mm/shadow/common.c   Fri May 13 17:00:39 2011 +0100
@@ -3712,7 +3712,7 @@ int shadow_track_dirty_vram(struct domai
 
         /* Iterate over VRAM to track dirty bits. */
         for ( i = 0; i < nr; i++ ) {
-            mfn_t mfn = gfn_to_mfn(d, begin_pfn + i, &t);
+            mfn_t mfn = gfn_to_mfn_query(d, begin_pfn + i, &t);
             struct page_info *page;
             int dirty = 0;
             paddr_t sl1ma = dirty_vram->sl1ma[i];
@@ -3797,7 +3797,7 @@ int shadow_track_dirty_vram(struct domai
                 /* was clean for more than two seconds, try to disable guest
                  * write access */
                 for ( i = begin_pfn; i < end_pfn; i++ ) {
-                    mfn_t mfn = gfn_to_mfn(d, i, &t);
+                    mfn_t mfn = gfn_to_mfn_query(d, i, &t);
                     if (mfn_x(mfn) != INVALID_MFN)
                         flush_tlb |= sh_remove_write_access(d->vcpu[0], mfn, 
1, 0);
                 }
diff -r f2b42201dc1a -r ba2902017f01 xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Fri May 13 17:00:39 2011 +0100
+++ b/xen/include/asm-x86/p2m.h Fri May 13 17:00:39 2011 +0100
@@ -411,6 +411,10 @@ static inline mfn_t gfn_to_mfn_type(stru
     return gfn_to_mfn_type_p2m(p2m, gfn, t, &a, q);
 }
 
+/* Syntactic sugar: most callers will use one of these. 
+ * N.B. gfn_to_mfn_query() is the _only_ one guaranteed not to take the
+ * p2m lock; none of the others can be called with the p2m, hap or
+ * shadow lock held. */
 #define gfn_to_mfn(d, g, t)         gfn_to_mfn_type((d), (g), (t), p2m_alloc)
 #define gfn_to_mfn_query(d, g, t)   gfn_to_mfn_type((d), (g), (t), p2m_query)
 #define gfn_to_mfn_guest(d, g, t)   gfn_to_mfn_type((d), (g), (t), p2m_guest)

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.