[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] x86/HVM: fix preemption handling in do_hvm_op() (try 2)



commit e8b87b57b4dfce2fd72b0e3d8319bf4e012f962c
Author:     Jan Beulich <JBeulich@xxxxxxxx>
AuthorDate: Wed Apr 2 09:09:00 2014 +0100
Commit:     Tim Deegan <tim@xxxxxxx>
CommitDate: Thu Apr 3 10:48:10 2014 +0100

    x86/HVM: fix preemption handling in do_hvm_op() (try 2)
    
    Just like previously done for some mem-op hypercalls, undo preemption
    using the interface structures (altering it in ways the caller may not
    expect) and replace it by storing the continuation point in the high
    bits of sub-operation argument.
    
    This also changes the "nr" fields of struct xen_hvm_track_dirty_vram
    (operation already limited to 1Gb worth of pages) and struct
    xen_hvm_modified_memory to be only 32 bits wide, consistent with those
    of struct xen_set_mem{type,access}. If that's not acceptable for some
    reason, we'd need to shrink the HVMOP_op_bits (while still enforcing
    the [then higher] limit resulting from the need to be able to encode
    the continuation).
    
    Whether (and if so how) to adjust xc_hvm_track_dirty_vram(),
    xc_hvm_modified_memory(), xc_hvm_set_mem_type(), and
    xc_hvm_set_mem_access() to reflect the 32-bit restriction on "nr" is
    unclear: If the APIs need to remain stable, all four functions should
    probably check that there was no truncation. Preferably their
    parameters would be changed to uint32_t or unsigned int, though.
    
    Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
    Acked-by: Tim Deegan <tim@xxxxxxx>
---
 xen/arch/x86/hvm/hvm.c          |   62 ++++++++++++++++++--------------------
 xen/arch/x86/mm/p2m.c           |   13 +++-----
 xen/include/asm-x86/p2m.h       |    4 +-
 xen/include/public/hvm/hvm_op.h |    8 ++--
 4 files changed, 40 insertions(+), 47 deletions(-)

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 5e89cf5..38c491e 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4072,13 +4072,16 @@ static int hvm_replace_event_channel(struct vcpu *v, 
domid_t remote_domid,
     return 0;
 }
 
+#define HVMOP_op_mask 0xff
+
 long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE_PARAM(void) arg)
 
 {
     struct domain *curr_d = current->domain;
+    unsigned long start_iter = op & ~HVMOP_op_mask;
     long rc = 0;
 
-    switch ( op )
+    switch ( op &= HVMOP_op_mask )
     {
     case HVMOP_set_param:
     case HVMOP_get_param:
@@ -4408,7 +4411,8 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
             goto param_fail3;
 
         rc = -EINVAL;
-        if ( ((a.first_pfn + a.nr - 1) < a.first_pfn) ||
+        if ( a.nr < start_iter ||
+             ((a.first_pfn + a.nr - 1) < a.first_pfn) ||
              ((a.first_pfn + a.nr - 1) > domain_get_maximum_gpfn(d)) )
             goto param_fail3;
 
@@ -4416,9 +4420,9 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
         if ( !paging_mode_log_dirty(d) )
             goto param_fail3;
 
-        while ( a.nr > 0 )
+        while ( a.nr > start_iter )
         {
-            unsigned long pfn = a.first_pfn;
+            unsigned long pfn = a.first_pfn + start_iter;
             struct page_info *page;
 
             page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE);
@@ -4431,16 +4435,11 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
                 put_page(page);
             }
 
-            a.first_pfn++;
-            a.nr--;
-
             /* Check for continuation if it's not the last interation */
-            if ( a.nr > 0 && hypercall_preempt_check() )
+            if ( a.nr > ++start_iter && !(start_iter & HVMOP_op_mask) &&
+                 hypercall_preempt_check() )
             {
-                if ( __copy_to_guest(arg, &a, 1) )
-                    rc = -EFAULT;
-                else
-                    rc = -EAGAIN;
+                rc = -EAGAIN;
                 break;
             }
         }
@@ -4522,16 +4521,17 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
             goto param_fail4;
 
         rc = -EINVAL;
-        if ( ((a.first_pfn + a.nr - 1) < a.first_pfn) ||
+        if ( a.nr < start_iter ||
+             ((a.first_pfn + a.nr - 1) < a.first_pfn) ||
              ((a.first_pfn + a.nr - 1) > domain_get_maximum_gpfn(d)) )
             goto param_fail4;
             
         if ( a.hvmmem_type >= ARRAY_SIZE(memtype) )
             goto param_fail4;
 
-        while ( a.nr )
+        while ( a.nr > start_iter )
         {
-            unsigned long pfn = a.first_pfn;
+            unsigned long pfn = a.first_pfn + start_iter;
             p2m_type_t t;
             p2m_type_t nt;
             mfn_t mfn;
@@ -4572,16 +4572,11 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
             }
             put_gfn(d, pfn);
 
-            a.first_pfn++;
-            a.nr--;
-
             /* Check for continuation if it's not the last interation */
-            if ( a.nr > 0 && hypercall_preempt_check() )
+            if ( a.nr > ++start_iter && !(start_iter & HVMOP_op_mask) &&
+                 hypercall_preempt_check() )
             {
-                if ( __copy_to_guest(arg, &a, 1) )
-                    rc = -EFAULT;
-                else
-                    rc = -EAGAIN;
+                rc = -EAGAIN;
                 goto param_fail4;
             }
         }
@@ -4615,19 +4610,17 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
 
         rc = -EINVAL;
         if ( (a.first_pfn != ~0ull) &&
-             (((a.first_pfn + a.nr - 1) < a.first_pfn) ||
+             (a.nr < start_iter ||
+              ((a.first_pfn + a.nr - 1) < a.first_pfn) ||
               ((a.first_pfn + a.nr - 1) > domain_get_maximum_gpfn(d))) )
             goto param_fail5;
             
-        rc = p2m_set_mem_access(d, a.first_pfn, a.nr, a.hvmmem_access);
+        rc = p2m_set_mem_access(d, a.first_pfn, a.nr, start_iter,
+                                HVMOP_op_mask, a.hvmmem_access);
         if ( rc > 0 )
         {
-            a.first_pfn += a.nr - rc;
-            a.nr = rc;
-            if ( __copy_to_guest(arg, &a, 1) )
-                rc = -EFAULT;
-            else
-                rc = -EAGAIN;
+            start_iter = rc;
+            rc = -EAGAIN;
         }
 
     param_fail5:
@@ -4776,8 +4769,11 @@ long do_hvm_op(unsigned long op, 
XEN_GUEST_HANDLE_PARAM(void) arg)
     }
 
     if ( rc == -EAGAIN )
-        rc = hypercall_create_continuation(
-            __HYPERVISOR_hvm_op, "lh", op, arg);
+    {
+        ASSERT(!(start_iter & HVMOP_op_mask));
+        rc = hypercall_create_continuation(__HYPERVISOR_hvm_op, "lh",
+                                           op | start_iter, arg);
+    }
 
     return rc;
 }
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 13d5e76..fcc3ed6 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1350,13 +1350,13 @@ void p2m_mem_access_resume(struct domain *d)
 /* Set access type for a region of pfns.
  * If start_pfn == -1ul, sets the default access type */
 long p2m_set_mem_access(struct domain *d, unsigned long pfn, uint32_t nr,
-                        hvmmem_access_t access)
+                        uint32_t start, uint32_t mask, hvmmem_access_t access)
 {
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
     p2m_access_t a, _a;
     p2m_type_t t;
     mfn_t mfn;
-    long rc;
+    long rc = 0;
 
     /* N.B. _not_ static: initializer depends on p2m->default_access */
     p2m_access_t memaccess[] = {
@@ -1385,11 +1385,8 @@ long p2m_set_mem_access(struct domain *d, unsigned long 
pfn, uint32_t nr,
         return 0;
     }
 
-    if ( !nr )
-        return 0;
-
     p2m_lock(p2m);
-    for ( ; ; ++pfn )
+    for ( pfn += start; nr > start; ++pfn )
     {
         mfn = p2m->get_entry(p2m, pfn, &t, &_a, 0, NULL);
         if ( p2m->set_entry(p2m, pfn, mfn, PAGE_ORDER_4K, t, a) == 0 )
@@ -1399,9 +1396,9 @@ long p2m_set_mem_access(struct domain *d, unsigned long 
pfn, uint32_t nr,
         }
 
         /* Check for continuation if it's not the last interation. */
-        if ( !--nr || hypercall_preempt_check() )
+        if ( nr > ++start && !(start & mask) && hypercall_preempt_check() )
         {
-            rc = nr;
+            rc = start;
             break;
         }
     }
diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h
index a2cb1b7..d644f82 100644
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -576,8 +576,8 @@ void p2m_mem_access_resume(struct domain *d);
 
 /* Set access type for a region of pfns.
  * If start_pfn == -1ul, sets the default access type */
-long p2m_set_mem_access(struct domain *d, unsigned long start_pfn,
-                        uint32_t nr, hvmmem_access_t access);
+long p2m_set_mem_access(struct domain *d, unsigned long start_pfn, uint32_t nr,
+                        uint32_t start, uint32_t mask, hvmmem_access_t access);
 
 /* Get access type for a pfn
  * If pfn == -1ul, gets the default access type */
diff --git a/xen/include/public/hvm/hvm_op.h b/xen/include/public/hvm/hvm_op.h
index a9aab4b..3204ec4 100644
--- a/xen/include/public/hvm/hvm_op.h
+++ b/xen/include/public/hvm/hvm_op.h
@@ -90,10 +90,10 @@ typedef enum {
 struct xen_hvm_track_dirty_vram {
     /* Domain to be tracked. */
     domid_t  domid;
+    /* Number of pages to track. */
+    uint32_t nr;
     /* First pfn to track. */
     uint64_aligned_t first_pfn;
-    /* Number of pages to track. */
-    uint64_aligned_t nr;
     /* OUT variable. */
     /* Dirty bitmap buffer. */
     XEN_GUEST_HANDLE_64(uint8) dirty_bitmap;
@@ -106,10 +106,10 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_track_dirty_vram_t);
 struct xen_hvm_modified_memory {
     /* Domain to be updated. */
     domid_t  domid;
+    /* Number of pages. */
+    uint32_t nr;
     /* First pfn. */
     uint64_aligned_t first_pfn;
-    /* Number of pages. */
-    uint64_aligned_t nr;
 };
 typedef struct xen_hvm_modified_memory xen_hvm_modified_memory_t;
 DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t);
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.