[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 7 of 8] x86/mm: clean use of p2m unlocked queries


  • To: xen-devel@xxxxxxxxxxxxxxxxxxx
  • From: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
  • Date: Wed, 25 Jan 2012 22:53:31 -0500
  • Cc: olaf@xxxxxxxxx, tim@xxxxxxx, andres@xxxxxxxxxxxxxx, adin@xxxxxxxxxxxxxx
  • Delivery-date: Thu, 26 Jan 2012 03:48:25 +0000
  • Domainkey-signature: a=rsa-sha1; c=nofws; d=lagarcavilla.org; h=content-type :mime-version:content-transfer-encoding:subject:message-id :in-reply-to:references:date:from:to:cc; q=dns; s= lagarcavilla.org; b=hU5YIhhy84sV2pxuNhKRPf8ZPZNZJx7UT3i887PV4C+/ LsPMraixT/xsQG/PtzijsotiNk0avXilJAV/mI0dWUmDOWhkZ5PwZo2TbtUUwqbD pJBrEe2GLWLAPrWhf1nsV68ZOSwk8XRUHx3v7+XwPvq6FIeW1OOGglPTOtVFj9A=
  • List-id: Xen developer discussion <xen-devel.lists.xensource.com>

 xen/arch/x86/hvm/emulate.c    |  35 ++++++++++++++++++++++++++++-------
 xen/arch/x86/hvm/hvm.c        |   5 ++++-
 xen/arch/x86/hvm/stdvga.c     |  12 ++++++++++--
 xen/arch/x86/hvm/vmx/vmx.c    |   2 +-
 xen/arch/x86/mm/mem_sharing.c |   2 +-
 5 files changed, 44 insertions(+), 12 deletions(-)


Limit such queries only to p2m_query types. This is more compatible
with the name and intended semantics: perform only a lookup, and explicitly
in an unlocked way.

Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>

diff -r d4336e35f0bc -r 2dc0ef05662e xen/arch/x86/hvm/emulate.c
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -660,7 +660,7 @@ static int hvmemul_rep_movs(
 {
     struct hvm_emulate_ctxt *hvmemul_ctxt =
         container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
-    unsigned long saddr, daddr, bytes;
+    unsigned long saddr, daddr, bytes, sgfn, dgfn;
     paddr_t sgpa, dgpa;
     uint32_t pfec = PFEC_page_present;
     p2m_type_t p2mt;
@@ -693,17 +693,28 @@ static int hvmemul_rep_movs(
     if ( rc != X86EMUL_OKAY )
         return rc;
 
-    /* Unlocked works here because we get_gfn for real in whatever
-     * we call later. */
-    (void)get_gfn_unlocked(current->domain, sgpa >> PAGE_SHIFT, &p2mt);
+    /* XXX In a fine-grained p2m locking scenario, we need to sort this
+     * get_gfn's, or else we might deadlock */
+    sgfn = sgpa >> PAGE_SHIFT;
+    (void)get_gfn(current->domain, sgfn, &p2mt);
     if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) )
-        return hvmemul_do_mmio(
+    {
+        rc = hvmemul_do_mmio(
             sgpa, reps, bytes_per_rep, dgpa, IOREQ_READ, df, NULL);
+        put_gfn(current->domain, sgfn);
+        return rc;
+    }
 
-    (void)get_gfn_unlocked(current->domain, dgpa >> PAGE_SHIFT, &p2mt);
+    dgfn = dgpa >> PAGE_SHIFT;
+    (void)get_gfn(current->domain, dgfn, &p2mt);
     if ( !p2m_is_ram(p2mt) && !p2m_is_grant(p2mt) )
-        return hvmemul_do_mmio(
+    {
+        rc = hvmemul_do_mmio(
             dgpa, reps, bytes_per_rep, sgpa, IOREQ_WRITE, df, NULL);
+        put_gfn(current->domain, sgfn);
+        put_gfn(current->domain, dgfn);
+        return rc;
+    }
 
     /* RAM-to-RAM copy: emulate as equivalent of memmove(dgpa, sgpa, bytes). */
     bytes = *reps * bytes_per_rep;
@@ -718,7 +729,11 @@ static int hvmemul_rep_movs(
      * can be emulated by a source-to-buffer-to-destination block copy.
      */
     if ( ((dgpa + bytes_per_rep) > sgpa) && (dgpa < (sgpa + bytes)) )
+    {
+        put_gfn(current->domain, sgfn);
+        put_gfn(current->domain, dgfn);
         return X86EMUL_UNHANDLEABLE;
+    }
 
     /* Adjust destination address for reverse copy. */
     if ( df )
@@ -727,7 +742,11 @@ static int hvmemul_rep_movs(
     /* Allocate temporary buffer. Fall back to slow emulation if this fails. */
     buf = xmalloc_bytes(bytes);
     if ( buf == NULL )
+    {
+        put_gfn(current->domain, sgfn);
+        put_gfn(current->domain, dgfn);
         return X86EMUL_UNHANDLEABLE;
+    }
 
     /*
      * We do a modicum of checking here, just for paranoia's sake and to
@@ -738,6 +757,8 @@ static int hvmemul_rep_movs(
         rc = hvm_copy_to_guest_phys(dgpa, buf, bytes);
 
     xfree(buf);
+    put_gfn(current->domain, sgfn);
+    put_gfn(current->domain, dgfn);
 
     if ( rc == HVMCOPY_gfn_paged_out )
         return X86EMUL_RETRY;
diff -r d4336e35f0bc -r 2dc0ef05662e xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3975,7 +3975,10 @@ long do_hvm_op(unsigned long op, XEN_GUE
         rc = -EINVAL;
         if ( is_hvm_domain(d) )
         {
-            get_gfn_unshare_unlocked(d, a.pfn, &t);
+            /* Use get_gfn query as we are interested in the current 
+             * type, not in allocating or unsharing. That'll happen 
+             * on access. */
+            get_gfn_query_unlocked(d, a.pfn, &t);
             if ( p2m_is_mmio(t) )
                 a.mem_type =  HVMMEM_mmio_dm;
             else if ( p2m_is_readonly(t) )
diff -r d4336e35f0bc -r 2dc0ef05662e xen/arch/x86/hvm/stdvga.c
--- a/xen/arch/x86/hvm/stdvga.c
+++ b/xen/arch/x86/hvm/stdvga.c
@@ -482,15 +482,19 @@ static int mmio_move(struct hvm_hw_stdvg
                 if ( hvm_copy_to_guest_phys(data, &tmp, p->size) !=
                      HVMCOPY_okay )
                 {
-                    (void)get_gfn_unlocked(d, data >> PAGE_SHIFT, &p2mt);
+                    (void)get_gfn(d, data >> PAGE_SHIFT, &p2mt);
                     /*
                      * The only case we handle is vga_mem <-> vga_mem.
                      * Anything else disables caching and leaves it to qemu-dm.
                      */
                     if ( (p2mt != p2m_mmio_dm) || (data < VGA_MEM_BASE) ||
                          ((data + p->size) > (VGA_MEM_BASE + VGA_MEM_SIZE)) )
+                    {
+                        put_gfn(d, data >> PAGE_SHIFT);
                         return 0;
+                    }
                     stdvga_mem_write(data, tmp, p->size);
+                    put_gfn(d, data >> PAGE_SHIFT);
                 }
                 data += sign * p->size;
                 addr += sign * p->size;
@@ -504,11 +508,15 @@ static int mmio_move(struct hvm_hw_stdvg
                 if ( hvm_copy_from_guest_phys(&tmp, data, p->size) !=
                      HVMCOPY_okay )
                 {
-                    (void)get_gfn_unlocked(d, data >> PAGE_SHIFT, &p2mt);
+                    (void)get_gfn(d, data >> PAGE_SHIFT, &p2mt);
                     if ( (p2mt != p2m_mmio_dm) || (data < VGA_MEM_BASE) ||
                          ((data + p->size) > (VGA_MEM_BASE + VGA_MEM_SIZE)) )
+                    {
+                        put_gfn(d, data >> PAGE_SHIFT);
                         return 0;
+                    }
                     tmp = stdvga_mem_read(data, p->size);
+                    put_gfn(d, data >> PAGE_SHIFT);
                 }
                 stdvga_mem_write(addr, tmp, p->size);
                 data += sign * p->size;
diff -r d4336e35f0bc -r 2dc0ef05662e xen/arch/x86/hvm/vmx/vmx.c
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2112,7 +2112,7 @@ static void ept_handle_violation(unsigne
         return;
 
     /* Everything else is an error. */
-    mfn = get_gfn_guest_unlocked(d, gfn, &p2mt);
+    mfn = get_gfn_query_unlocked(d, gfn, &p2mt);
     gdprintk(XENLOG_ERR, "EPT violation %#lx (%c%c%c/%c%c%c), "
              "gpa %#"PRIpaddr", mfn %#lx, type %i.\n", 
              qualification, 
diff -r d4336e35f0bc -r 2dc0ef05662e xen/arch/x86/mm/mem_sharing.c
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -421,7 +421,7 @@ int mem_sharing_debug_gfn(struct domain 
     p2m_type_t p2mt;
     mfn_t mfn;
 
-    mfn = get_gfn_unlocked(d, gfn, &p2mt);
+    mfn = get_gfn_query_unlocked(d, gfn, &p2mt);
 
     gdprintk(XENLOG_DEBUG, "Debug for domain=%d, gfn=%lx, ", 
                d->domain_id, 

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.