[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 0 of 2] Add libxc API that sets mem_access type for an array of gfns



On Fri, Apr 27, 2012 at 11:25 AM, Aravindh Puthiyaparambil
<aravindh@xxxxxxxxxxxx> wrote:
> On Fri, Apr 27, 2012 at 10:37 AM, Christian Limpach
> <christian.limpach@xxxxxxxxx> wrote:
>> On Thu, Apr 26, 2012 at 6:36 PM, Aravindh Puthiyaparambil
>> <aravindh@xxxxxxxxxxxx> wrote:
>>>
>>> On Apr 26, 2012 6:06 PM, "Christian Limpach" <christian.limpach@xxxxxxxxx>
>>> wrote:
>>>>
>>>> Maybe you can do something similar, for example passing in a hint
>>>> whether ept_sync_domain needs to be done or not.  In my case, the
>>>> reasoning is that all the entries changed from hap_clean_vram_tracking
>>>> are leaf entries, so ept_free_entry will never be called and thus
>>>> ept_sync_domain can be deferred.  I didn't think through/consider the
>>>> iommu case since that code is commented out in my tree.
>>>
>>> I thought about doing that initially. But then in the bulk case I would
>>> always have to call ept_sync_domain() to be on the safe side. But if the
>>> iommu case forces me down that path, then I guess I have no choice.
>>
>> I think you should re-consider.  It would avoid adding the extra
>> wrapper, which makes this code a lot less readable.  Plus it avoids
>> the need for the old_entries array.
>>
>> Let me re-iterate:
>> - if it's a leaf entry, then there's no need to call ept_free_entry
>> - if you don't need to call ept_free_entry, then you can defer
>> ept_sync_domain (subject to the iommu case)
>> - you could pass in a pointer to a bool to indicate to the caller that
>> ept_sync_domain was deferred and that the caller needs to do it, with
>> a NULL pointer indicating that the caller doesn't support defer

How does this look?

changeset:   25257:2c05bdb052ea
user:        Aravindh Puthiyaparambil <aravindh@xxxxxxxxxxxx>
date:        Fri Apr 27 20:28:37 2012 -0700
summary:     x86/mm: Add sync deferred option to p2m->set_entry()

diff -r 9dda0efd8ce1 -r 2c05bdb052ea xen/arch/x86/mm/mem_sharing.c
--- a/xen/arch/x86/mm/mem_sharing.c     Fri Apr 27 17:57:55 2012 +0200
+++ b/xen/arch/x86/mm/mem_sharing.c     Fri Apr 27 20:28:37 2012 -0700
@@ -1272,7 +1272,7 @@ int relinquish_shared_pages(struct domai
             /* Clear out the p2m entry so no one else may try to
              * unshare */
             p2m->set_entry(p2m, gfn, _mfn(0), PAGE_ORDER_4K,
-                            p2m_invalid, p2m_access_rwx);
+                            p2m_invalid, p2m_access_rwx, NULL);
             count++;
         }

diff -r 9dda0efd8ce1 -r 2c05bdb052ea xen/arch/x86/mm/p2m-ept.c
--- a/xen/arch/x86/mm/p2m-ept.c Fri Apr 27 17:57:55 2012 +0200
+++ b/xen/arch/x86/mm/p2m-ept.c Fri Apr 27 20:28:37 2012 -0700
@@ -274,10 +274,13 @@ static int ept_next_level(struct p2m_dom
 /*
  * ept_set_entry() computes 'need_modify_vtd_table' for itself,
  * by observing whether any gfn->mfn translations are modified.
+ * If sync_deferred is not NULL, then the caller will take care of
+ * calling ept_sync_domain() in the cases where it can be deferred.
  */
 static int
 ept_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
-              unsigned int order, p2m_type_t p2mt, p2m_access_t p2ma)
+              unsigned int order, p2m_type_t p2mt, p2m_access_t p2ma,
+              bool_t *sync_deferred)
 {
     ept_entry_t *table, *ept_entry = NULL;
     unsigned long gfn_remainder = gfn;
@@ -293,6 +296,7 @@ ept_set_entry(struct p2m_domain *p2m, un
     int needs_sync = 1;
     struct domain *d = p2m->domain;
     ept_entry_t old_entry = { .epte = 0 };
+    bool_t _sync_deferred = 0;

     /*
      * the caller must make sure:
@@ -309,6 +313,9 @@ ept_set_entry(struct p2m_domain *p2m, un
            (target == 1 && hvm_hap_has_2mb(d)) ||
            (target == 0));

+    if (sync_deferred)
+        *sync_deferred = 1;
+
     table = map_domain_page(ept_get_asr(d));

     for ( i = ept_get_wl(d); i > target; i-- )
@@ -346,7 +353,11 @@ ept_set_entry(struct p2m_domain *p2m, un

         /* No need to flush if the old entry wasn't valid */
         if ( !is_epte_present(ept_entry) )
+        {
             needs_sync = 0;
+            if ( sync_deferred )
+                *sync_deferred = 0;
+        }

         /* If we're replacing a non-leaf entry with a leaf entry
(1GiB or 2MiB),
          * the intermediate tables will be freed below after the ept flush
@@ -385,6 +396,9 @@ ept_set_entry(struct p2m_domain *p2m, un

         ASSERT(is_epte_superpage(ept_entry));

+        if ( sync_deferred )
+            _sync_deferred = 1;
+
         split_ept_entry = atomic_read_ept_entry(ept_entry);

         if ( !ept_split_super_page(p2m, &split_ept_entry, i, target) )
@@ -438,7 +452,7 @@ ept_set_entry(struct p2m_domain *p2m, un
 out:
     unmap_domain_page(table);

-    if ( needs_sync )
+    if ( needs_sync && !_sync_deferred )
         ept_sync_domain(p2m->domain);

     if ( rv && iommu_enabled && need_iommu(p2m->domain) &&
need_modify_vtd_table )
@@ -727,7 +741,8 @@ void ept_change_entry_emt_with_range(str
                     order = level * EPT_TABLE_ORDER;
                     if ( need_modify_ept_entry(p2m, gfn, mfn,
                           e.ipat, e.emt, e.sa_p2mt) )
-                        ept_set_entry(p2m, gfn, mfn, order,
e.sa_p2mt, e.access);
+                        ept_set_entry(p2m, gfn, mfn, order,
e.sa_p2mt, e.access,
+                                      NULL);
                     gfn += trunk;
                     break;
                 }
@@ -737,7 +752,7 @@ void ept_change_entry_emt_with_range(str
         else /* gfn assigned with 4k */
         {
             if ( need_modify_ept_entry(p2m, gfn, mfn, e.ipat, e.emt,
e.sa_p2mt) )
-                ept_set_entry(p2m, gfn, mfn, order, e.sa_p2mt, e.access);
+                ept_set_entry(p2m, gfn, mfn, order, e.sa_p2mt, e.access, NULL);
         }
     }
     p2m_unlock(p2m);
diff -r 9dda0efd8ce1 -r 2c05bdb052ea xen/arch/x86/mm/p2m-pt.c
--- a/xen/arch/x86/mm/p2m-pt.c  Fri Apr 27 17:57:55 2012 +0200
+++ b/xen/arch/x86/mm/p2m-pt.c  Fri Apr 27 20:28:37 2012 -0700
@@ -291,7 +291,8 @@ p2m_next_level(struct p2m_domain *p2m, m
 // Returns 0 on error (out of memory)
 static int
 p2m_set_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn,
-              unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma)
+              unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma,
+              bool_t *unused)
 {
     // XXX -- this might be able to be faster iff current->domain == d
     mfn_t table_mfn = pagetable_get_mfn(p2m_get_pagetable(p2m));
diff -r 9dda0efd8ce1 -r 2c05bdb052ea xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Fri Apr 27 17:57:55 2012 +0200
+++ b/xen/arch/x86/mm/p2m.c     Fri Apr 27 20:28:37 2012 -0700
@@ -227,7 +227,7 @@ int set_p2m_entry(struct p2m_domain *p2m
         else
             order = 0;

-        if ( !p2m->set_entry(p2m, gfn, mfn, order, p2mt, p2ma) )
+        if ( !p2m->set_entry(p2m, gfn, mfn, order, p2mt, p2ma, NULL) )
             rc = 0;
         gfn += 1ul << order;
         if ( mfn_x(mfn) != INVALID_MFN )
@@ -1199,14 +1199,14 @@ bool_t p2m_mem_access_check(unsigned lon

     if ( access_w && p2ma == p2m_access_rx2rw )
     {
-        p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rw);
+        p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt,
p2m_access_rw, NULL);
         gfn_unlock(p2m, gfn, 0);
         return 1;
     }
     else if ( p2ma == p2m_access_n2rwx )
     {
         ASSERT(access_w || access_r || access_x);
-        p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt, p2m_access_rwx);
+        p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt,
p2m_access_rwx, NULL);
     }
     gfn_unlock(p2m, gfn, 0);

@@ -1228,7 +1228,8 @@ bool_t p2m_mem_access_check(unsigned lon
             {
                 /* A listener is not required, so clear the access
restrictions */
                 gfn_lock(p2m, gfn, 0);
-                p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt,
p2m_access_rwx);
+                p2m->set_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2mt,
+                               p2m_access_rwx, NULL);
                 gfn_unlock(p2m, gfn, 0);
             }
             return 1;
@@ -1292,6 +1293,7 @@ int p2m_set_mem_access(struct domain *d,
     p2m_type_t t;
     mfn_t mfn;
     int rc = 0;
+    bool_t sync_deferred = 1;

     /* N.B. _not_ static: initializer depends on p2m->default_access */
     p2m_access_t memaccess[] = {
@@ -1324,12 +1326,17 @@ int p2m_set_mem_access(struct domain *d,
     for ( pfn = start_pfn; pfn < start_pfn + nr; pfn++ )
     {
         mfn = p2m->get_entry(p2m, pfn, &t, &_a, 0, NULL);
-        if ( p2m->set_entry(p2m, pfn, mfn, PAGE_ORDER_4K, t, a) == 0 )
+        if ( p2m->set_entry(p2m, pfn, mfn, PAGE_ORDER_4K, t, a, &sync_deferred)
+             == 0 )
         {
             rc = -ENOMEM;
             break;
         }
     }
+
+    if ( sync_deferred )
+        ept_sync_domain(p2m->domain);
+
     p2m_unlock(p2m);
     return rc;
 }
diff -r 9dda0efd8ce1 -r 2c05bdb052ea xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Fri Apr 27 17:57:55 2012 +0200
+++ b/xen/include/asm-x86/p2m.h Fri Apr 27 20:28:37 2012 -0700
@@ -231,7 +231,8 @@ struct p2m_domain {
                                        unsigned long gfn,
                                        mfn_t mfn, unsigned int page_order,
                                        p2m_type_t p2mt,
-                                       p2m_access_t p2ma);
+                                       p2m_access_t p2ma,
+                                       bool_t *sync_deferred);
     mfn_t              (*get_entry   )(struct p2m_domain *p2m,
                                        unsigned long gfn,
                                        p2m_type_t *p2mt,

changeset:   25258:5a0d60bb536b
user:        Aravindh Puthiyaparambil <aravindh@xxxxxxxxxxxx>
date:        Fri Apr 27 21:10:59 2012 -0700
summary:     mem_access: Add xc_hvm_mem_access_bulk() API

diff -r 2c05bdb052ea -r 5a0d60bb536b tools/libxc/xc_misc.c
--- a/tools/libxc/xc_misc.c     Fri Apr 27 20:28:37 2012 -0700
+++ b/tools/libxc/xc_misc.c     Fri Apr 27 21:10:59 2012 -0700
@@ -570,6 +570,57 @@ int xc_hvm_set_mem_access(
     return rc;
 }

+int xc_hvm_set_mem_access_bulk(
+    xc_interface *xch, domid_t dom, hvmmem_access_t mem_access,
+    xen_pfn_t *arr, int *err, uint64_t nr)
+{
+    DECLARE_HYPERCALL;
+    DECLARE_HYPERCALL_BUFFER(struct xen_hvm_set_mem_access_bulk, arg);
+    DECLARE_HYPERCALL_BOUNCE(arr, sizeof(xen_pfn_t) * nr,
+                             XC_HYPERCALL_BUFFER_BOUNCE_IN);
+    DECLARE_HYPERCALL_BOUNCE(err, sizeof(int) * nr,
+                             XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+    int rc;
+
+    arg = xc_hypercall_buffer_alloc(xch, arg, sizeof(*arg));
+    if ( arg == NULL )
+    {
+        PERROR("Could not allocate memory for
xc_hvm_set_mem_access_bulk hypercall");
+        return -1;
+    }
+
+    if ( xc_hypercall_bounce_pre(xch, arr) ) {
+        PERROR("Could not bounce arr for xc_hvm_set_mem_access_bulk
hypercall");
+        rc = -1;
+        goto out;
+    }
+
+    if ( xc_hypercall_bounce_pre(xch, err) ) {
+        PERROR("Could not bounce err for xc_hvm_set_mem_access_bulk
hypercall");
+        rc = -1;
+        goto out;
+    }
+
+    arg->domid         = dom;
+    arg->hvmmem_access = mem_access;
+    arg->nr            = nr;
+    set_xen_guest_handle(arg->arr, arr);
+    set_xen_guest_handle(arg->err, err);
+
+    hypercall.op     = __HYPERVISOR_hvm_op;
+    hypercall.arg[0] = HVMOP_set_mem_access_bulk;
+    hypercall.arg[1] = HYPERCALL_BUFFER_AS_ARG(arg);
+
+    rc = do_xen_hypercall(xch, &hypercall);
+
+out:
+    xc_hypercall_buffer_free(xch, arg);
+    xc_hypercall_bounce_post(xch, arr);
+    xc_hypercall_bounce_post(xch, err);
+
+    return rc;
+}
+
 int xc_hvm_get_mem_access(
     xc_interface *xch, domid_t dom, uint64_t pfn, hvmmem_access_t* mem_access)
 {
diff -r 2c05bdb052ea -r 5a0d60bb536b tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h     Fri Apr 27 20:28:37 2012 -0700
+++ b/tools/libxc/xenctrl.h     Fri Apr 27 21:10:59 2012 -0700
@@ -1568,6 +1568,17 @@ int xc_hvm_set_mem_access(
     xc_interface *xch, domid_t dom, hvmmem_access_t memaccess,
uint64_t first_pfn, uint64_t nr);

 /*
+ * Set the arry of pfns to a specific access.
+ * When a pfn cannot be set to the specified access, its respective field in
+ * @err is set to the corresponding errno value.
+ * Allowed types are HVMMEM_access_default, HVMMEM_access_n, any combination of
+ * HVM_access_ + (rwx), and HVM_access_rx2rw
+ */
+int xc_hvm_set_mem_access_bulk(
+    xc_interface *xch, domid_t dom, hvmmem_access_t memaccess,
+    xen_pfn_t *arr, int *err, uint64_t num);
+
+/*
  * Gets the mem access for the given page (returned in memacess on success)
  */
 int xc_hvm_get_mem_access(
diff -r 2c05bdb052ea -r 5a0d60bb536b xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c    Fri Apr 27 20:28:37 2012 -0700
+++ b/xen/arch/x86/hvm/hvm.c    Fri Apr 27 21:10:59 2012 -0700
@@ -4197,6 +4197,51 @@ long do_hvm_op(unsigned long op, XEN_GUE
         break;
     }

+    case HVMOP_set_mem_access_bulk:
+    {
+        struct xen_hvm_set_mem_access_bulk a;
+        struct domain *d;
+        xen_pfn_t *arr = 0;
+        int *err = 0;
+
+        if ( copy_from_guest(&a, arg, 1) )
+            return -EFAULT;
+
+        rc = rcu_lock_remote_target_domain_by_id(a.domid, &d);
+        if ( rc != 0 )
+            return rc;
+
+        rc = -EINVAL;
+
+        if ( !is_hvm_domain(d) )
+            goto param_fail9;
+
+        rc = -ENOMEM;
+        arr = xmalloc_array(xen_pfn_t, a.nr);
+        if (!arr)
+            goto param_fail9;
+
+        err = xmalloc_array(int, a.nr);
+        if (!err)
+            goto param_fail9;
+
+        if ( copy_from_guest(arr, a.arr, a.nr) )
+            goto param_fail9;
+
+        rc = p2m_set_access_bulk(d, arr, err, a.nr, a.hvmmem_access);
+
+        if ( copy_to_guest(a.err, err, a.nr) )
+            goto param_fail9;
+
+    param_fail9:
+        rcu_unlock_domain(d);
+        if (arr)
+            xfree(arr);
+        if (err)
+            xfree(err);
+        break;
+    }
+
     case HVMOP_get_mem_access:
     {
         struct xen_hvm_get_mem_access a;
diff -r 2c05bdb052ea -r 5a0d60bb536b xen/arch/x86/mm/p2m.c
--- a/xen/arch/x86/mm/p2m.c     Fri Apr 27 20:28:37 2012 -0700
+++ b/xen/arch/x86/mm/p2m.c     Fri Apr 27 21:10:59 2012 -0700
@@ -1341,6 +1341,61 @@ int p2m_set_mem_access(struct domain *d,
     return rc;
 }

+int p2m_set_access_bulk(struct domain *d, xen_pfn_t *arr, int *err,
+                        uint64_t nr, hvmmem_access_t access)
+{
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
+    unsigned long pfn;
+    p2m_access_t a, _a;
+    p2m_type_t t;
+    p2m_access_t memaccess[] = {
+        p2m_access_n,
+        p2m_access_r,
+        p2m_access_w,
+        p2m_access_rw,
+        p2m_access_x,
+        p2m_access_rx,
+        p2m_access_wx,
+        p2m_access_rwx,
+        p2m_access_rx2rw,
+        p2m_access_n2rwx,
+        p2m->default_access,
+    };
+    mfn_t mfn;
+    int rc;
+    bool_t sync_deferred = 1;
+
+    if ( (unsigned) access >= HVMMEM_access_default )
+        return -EINVAL;
+
+    a = memaccess[access];
+
+    p2m_lock(p2m);
+
+    for ( pfn = 0; pfn < nr; pfn++ )
+    {
+        if ( arr[pfn] > domain_get_maximum_gpfn(d) )
+        {
+            err[pfn] = -EINVAL;
+            continue;
+        }
+
+        mfn = p2m->get_entry(p2m, arr[pfn], &t, &_a, 0, NULL);
+        rc = p2m->set_entry(p2m, arr[pfn], mfn, PAGE_ORDER_4K, t, a,
+                            &sync_deferred);
+        if ( rc == 0 )
+            err[pfn] = -ENOMEM;
+    }
+
+    if ( sync_deferred )
+        ept_sync_domain(p2m->domain);
+
+    p2m_unlock(p2m);
+
+    return 0;
+}
+
+
 /* Get access type for a pfn
  * If pfn == -1ul, gets the default access type */
 int p2m_get_mem_access(struct domain *d, unsigned long pfn,
diff -r 2c05bdb052ea -r 5a0d60bb536b xen/include/asm-x86/p2m.h
--- a/xen/include/asm-x86/p2m.h Fri Apr 27 20:28:37 2012 -0700
+++ b/xen/include/asm-x86/p2m.h Fri Apr 27 21:10:59 2012 -0700
@@ -574,6 +574,11 @@ void p2m_mem_access_resume(struct domain
 int p2m_set_mem_access(struct domain *d, unsigned long start_pfn,
                        uint32_t nr, hvmmem_access_t access);

+/* Set access type for an array of pfns. set_mem_access success or failure is
+ * returned in the err array. */
+int p2m_set_access_bulk(struct domain *d, xen_pfn_t *arr, int *err,
+                        uint64_t nr, hvmmem_access_t access);
+
 /* Get access type for a pfn
  * If pfn == -1ul, gets the default access type */
 int p2m_get_mem_access(struct domain *d, unsigned long pfn,
@@ -589,7 +594,11 @@ static inline int p2m_set_mem_access(str
                                      unsigned long start_pfn,
                                      uint32_t nr, hvmmem_access_t access)
 { return -EINVAL; }
-static inline int p2m_get_mem_access(struct domain *d, unsigned long pfn,
+static inline int p2m_set_access_bulk(struct domain *d, xen_pfn_t *arr,
+                                      int *err, uint64_t nr,
+                                      hvmmem_access_t access)
+{ return -EINVAL; }
+static inline int p2m_get_mem_access(struct domain *d, unsigned long pfn,
                                      hvmmem_access_t *access)
 { return -EINVAL; }
 #endif
diff -r 2c05bdb052ea -r 5a0d60bb536b xen/include/public/hvm/hvm_op.h
--- a/xen/include/public/hvm/hvm_op.h   Fri Apr 27 20:28:37 2012 -0700
+++ b/xen/include/public/hvm/hvm_op.h   Fri Apr 27 21:10:59 2012 -0700
@@ -261,4 +261,22 @@ DEFINE_XEN_GUEST_HANDLE(xen_hvm_inject_m

 #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */

+#if defined(__XEN__) || defined(__XEN_TOOLS__)
+#define HVMOP_set_mem_access_bulk      17
+/* Notify that a array of pfns is to have specific access types */
+struct xen_hvm_set_mem_access_bulk {
+    /* Domain to be updated. */
+    domid_t domid;
+    /* Memory type */
+    uint16_t hvmmem_access; /* hvm_access_t */
+    /* Array of pfns */
+    XEN_GUEST_HANDLE_64(xen_pfn_t) arr;
+    XEN_GUEST_HANDLE_64(int) err ;
+    /* Number of entries */
+    uint64_t nr;
+};
+typedef struct xen_hvm_set_mem_access_bulk xen_hvm_set_mem_access_bulk_t;
+DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_access_bulk_t);
+#endif
+
 #endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.