[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] x86/mm: Fix deadlock between p2m and event channel locks.



# HG changeset patch
# User Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
# Date 1331737660 14400
# Node ID 4dbbc69ebece6376775e3ab9db4aa3e69c0f6fcf
# Parent  c25ec9e9a8126d562d2a72620aea8ee6b104f89e
x86/mm: Fix deadlock between p2m and event channel locks.

The hvm io emulation code holds the p2m lock for the duration of the
emulation, which may include sending an event to qemu. On a separate path,
map_domain_pirq grabs the event channel and p2m locks in opposite order.

Fix this by ensuring liveness of the ram_gfn used by io emulation, with a
page ref.

Reported-by: "Hao, Xudong" <xudong.hao@xxxxxxxxx>
Signed-off-by: "Hao, Xudong" <xudong.hao@xxxxxxxxx>
Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
Committed-by: Tim Deegan <tim@xxxxxxx>
---


diff -r c25ec9e9a812 -r 4dbbc69ebece xen/arch/x86/hvm/emulate.c
--- a/xen/arch/x86/hvm/emulate.c        Wed Mar 14 14:20:58 2012 +0000
+++ b/xen/arch/x86/hvm/emulate.c        Wed Mar 14 11:07:40 2012 -0400
@@ -77,6 +77,17 @@
         return X86EMUL_RETRY;
     }
 
+    /* Maintain a ref on the mfn to ensure liveness. Put the gfn
+     * to avoid potential deadlock wrt event channel lock, later. */
+    if ( mfn_valid(mfn_x(ram_mfn)) )
+        if ( !get_page(mfn_to_page(mfn_x(ram_mfn)),
+             curr->domain) )
+        {
+            put_gfn(curr->domain, ram_gfn);
+            return X86EMUL_RETRY;
+        }
+    put_gfn(curr->domain, ram_gfn);
+
     /*
      * Weird-sized accesses have undefined behaviour: we discard writes
      * and read all-ones.
@@ -87,7 +98,8 @@
         ASSERT(p_data != NULL); /* cannot happen with a REP prefix */
         if ( dir == IOREQ_READ )
             memset(p_data, ~0, size);
-        put_gfn(curr->domain, ram_gfn); 
+        if ( mfn_valid(mfn_x(ram_mfn)) )
+            put_page(mfn_to_page(mfn_x(ram_mfn)));
         return X86EMUL_UNHANDLEABLE;
     }
 
@@ -108,7 +120,8 @@
             unsigned int bytes = vio->mmio_large_write_bytes;
             if ( (addr >= pa) && ((addr + size) <= (pa + bytes)) )
             {
-                put_gfn(curr->domain, ram_gfn); 
+                if ( mfn_valid(mfn_x(ram_mfn)) )
+                    put_page(mfn_to_page(mfn_x(ram_mfn)));
                 return X86EMUL_OKAY;
             }
         }
@@ -120,7 +133,8 @@
             {
                 memcpy(p_data, &vio->mmio_large_read[addr - pa],
                        size);
-                put_gfn(curr->domain, ram_gfn); 
+                if ( mfn_valid(mfn_x(ram_mfn)) )
+                    put_page(mfn_to_page(mfn_x(ram_mfn)));
                 return X86EMUL_OKAY;
             }
         }
@@ -134,7 +148,8 @@
         vio->io_state = HVMIO_none;
         if ( p_data == NULL )
         {
-            put_gfn(curr->domain, ram_gfn);
+            if ( mfn_valid(mfn_x(ram_mfn)) )
+                put_page(mfn_to_page(mfn_x(ram_mfn)));
             return X86EMUL_UNHANDLEABLE;
         }
         goto finish_access;
@@ -144,11 +159,13 @@
              (addr == (vio->mmio_large_write_pa +
                        vio->mmio_large_write_bytes)) )
         {
-            put_gfn(curr->domain, ram_gfn);
+            if ( mfn_valid(mfn_x(ram_mfn)) )
+                put_page(mfn_to_page(mfn_x(ram_mfn)));
             return X86EMUL_RETRY;
         }
     default:
-        put_gfn(curr->domain, ram_gfn);
+        if ( mfn_valid(mfn_x(ram_mfn)) )
+            put_page(mfn_to_page(mfn_x(ram_mfn)));
         return X86EMUL_UNHANDLEABLE;
     }
 
@@ -156,7 +173,8 @@
     {
         gdprintk(XENLOG_WARNING, "WARNING: io already pending (%d)?\n",
                  p->state);
-        put_gfn(curr->domain, ram_gfn); 
+        if ( mfn_valid(mfn_x(ram_mfn)) )
+            put_page(mfn_to_page(mfn_x(ram_mfn)));
         return X86EMUL_UNHANDLEABLE;
     }
 
@@ -208,7 +226,8 @@
 
     if ( rc != X86EMUL_OKAY )
     {
-        put_gfn(curr->domain, ram_gfn); 
+        if ( mfn_valid(mfn_x(ram_mfn)) )
+            put_page(mfn_to_page(mfn_x(ram_mfn)));
         return rc;
     }
 
@@ -244,7 +263,8 @@
         }
     }
 
-    put_gfn(curr->domain, ram_gfn); 
+    if ( mfn_valid(mfn_x(ram_mfn)) )
+        put_page(mfn_to_page(mfn_x(ram_mfn)));
     return X86EMUL_OKAY;
 }
 

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.