[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 3/3] xen: introduce XENMEM_get_dma_buf and XENMEM_put_dma_buf



Introduce two new hypercalls XENMEM_get_dma_buf and XENMEM_put_dma_buf.

XENMEM_get_dma_buf exchanges a set of pages for a new set, contiguous and
under 4G if so requested. The new pages are going to be "pinned" so that
their p2m mapping won't be changed, until XENMEM_put_dma_buf is called.
XENMEM_get_dma_buf returns the MFNs of the new pages to the caller.

The only effect of XENMEM_put_dma_buf is to "unpin" the previously
pinned pages. Afterwards the p2m mappings can be transparently changed by
the hypervisor as normal. The memory remains accessible from the guest.

Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
---
 xen/common/memory.c         |   73 +++++++++++++++++++++++++++++++++++++++++--
 xen/include/public/memory.h |   64 +++++++++++++++++++++++++++++++++++++
 2 files changed, 134 insertions(+), 3 deletions(-)

diff --git a/xen/common/memory.c b/xen/common/memory.c
index 50b740f..3e1db2a 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -279,7 +279,7 @@ static void decrease_reservation(struct memop_args *a)
     a->nr_done = i;
 }
 
-static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
+static long memory_exchange(int op, 
XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
 {
     struct xen_memory_exchange exch;
     PAGE_LIST_HEAD(in_chunk_list);
@@ -496,7 +496,7 @@ static long 
memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
             mfn = page_to_mfn(page);
             guest_physmap_add_page(d, gpfn, mfn, exch.out.extent_order);
 
-            if ( !paging_mode_translate(d) )
+            if ( op == XENMEM_get_dma_buf || !paging_mode_translate(d) )
             {
                 for ( k = 0; k < (1UL << exch.out.extent_order); k++ )
                     set_gpfn_from_mfn(mfn + k, gpfn + k);
@@ -505,6 +505,18 @@ static long 
memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
                                             &mfn, 1) )
                     rc = -EFAULT;
             }
+
+            if ( op == XENMEM_get_dma_buf )
+            {
+                static int warning;
+                if ( guest_physmap_pin_range(d, gpfn, exch.out.extent_order) )
+                {
+                    if (!warning) {
+                        gdprintk(XENLOG_WARNING, "guest_physmap_pin_range not 
implemented\n");
+                        warning = 1;
+                    }
+                }
+            }
         }
         BUG_ON( !(d->is_dying) && (j != (1UL << out_chunk_order)) );
     }
@@ -542,6 +554,54 @@ static long 
memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
     return rc;
 }
 
+static long put_dma_buf(XEN_GUEST_HANDLE_PARAM(xen_put_dma_buf_t) arg)
+{
+    int rc, i;
+    struct xen_put_dma_buf put_dma_buf;
+    xen_pfn_t gpfn;
+    struct domain *d;
+
+    if ( copy_from_guest(&put_dma_buf, arg, 1) )
+        return -EFAULT;
+
+    /* Various sanity checks. */
+    if ( /* Extent orders are sensible? */
+         (put_dma_buf.in.extent_order > MAX_ORDER) ||
+         /* Sizes of input list do not overflow a long? */
+         ((~0UL >> put_dma_buf.in.extent_order) < put_dma_buf.in.nr_extents) )
+        return -EFAULT;
+
+    if ( !guest_handle_okay(put_dma_buf.in.extent_start, 
put_dma_buf.in.nr_extents) )
+        return -EFAULT;
+
+    d = rcu_lock_domain_by_any_id(put_dma_buf.in.domid);
+    if ( d == NULL )
+    {
+        rc = -ESRCH;
+        goto fail;
+    }
+
+    for ( i = 0; i < put_dma_buf.in.nr_extents; i++ )
+    {
+        if ( unlikely(__copy_from_guest_offset(
+                      &gpfn, put_dma_buf.in.extent_start, i, 1)) )
+        {
+            rc = -EFAULT;
+            goto fail;
+        }
+
+        rc = guest_physmap_unpin_range(d, gpfn, put_dma_buf.in.extent_order);
+        if ( rc )
+            goto fail;
+    }
+
+    rc = 0;
+
+ fail:
+    rcu_unlock_domain(d);
+    return rc;
+}
+
 long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
 {
     struct domain *d;
@@ -630,8 +690,15 @@ long do_memory_op(unsigned long cmd, 
XEN_GUEST_HANDLE_PARAM(void) arg)
 
         break;
 
+    case XENMEM_get_dma_buf:
+        /* xen_get_dma_buf_t is identical to xen_memory_exchange_t, so
+         * just cast it and reuse memory_exchange */
     case XENMEM_exchange:
-        rc = memory_exchange(guest_handle_cast(arg, xen_memory_exchange_t));
+        rc = memory_exchange(op, guest_handle_cast(arg, 
xen_memory_exchange_t));
+        break;
+
+    case XENMEM_put_dma_buf:
+        rc = put_dma_buf(guest_handle_cast(arg, xen_put_dma_buf_t));
         break;
 
     case XENMEM_maximum_ram_page:
diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
index 7a26dee..354b117 100644
--- a/xen/include/public/memory.h
+++ b/xen/include/public/memory.h
@@ -459,6 +459,70 @@ DEFINE_XEN_GUEST_HANDLE(xen_mem_sharing_op_t);
  * The zero value is appropiate.
  */
 
+#define XENMEM_get_dma_buf             26
+/*
+ * This hypercall is similar to XENMEM_exchange: it exchanges the pages
+ * passed in with a new set of pages, contiguous and under 4G if so
+ * requested. The new pages are going to be "pinned": it's guaranteed
+ * that their p2m mapping won't be changed until explicitly "unpinned".
+ * If return code is zero then @out.extent_list provides the MFNs of the
+ * newly-allocated memory.  Returns zero on complete success, otherwise
+ * a negative error code.
+ * On complete success then always @nr_exchanged == @in.nr_extents.  On
+ * partial success @nr_exchanged indicates how much work was done.
+ */
+struct xen_get_dma_buf {
+    /*
+     * [IN] Details of memory extents to be exchanged (GMFN bases).
+     * Note that @in.address_bits is ignored and unused.
+     */
+    struct xen_memory_reservation in;
+
+    /*
+     * [IN/OUT] Details of new memory extents.
+     * We require that:
+     *  1. @in.domid == @out.domid
+     *  2. @in.nr_extents  << @in.extent_order == 
+     *     @out.nr_extents << @out.extent_order
+     *  3. @in.extent_start and @out.extent_start lists must not overlap
+     *  4. @out.extent_start lists GPFN bases to be populated
+     *  5. @out.extent_start is overwritten with allocated GMFN bases
+     */
+    struct xen_memory_reservation out;
+
+    /*
+     * [OUT] Number of input extents that were successfully exchanged:
+     *  1. The first @nr_exchanged input extents were successfully
+     *     deallocated.
+     *  2. The corresponding first entries in the output extent list correctly
+     *     indicate the GMFNs that were successfully exchanged.
+     *  3. All other input and output extents are untouched.
+     *  4. If not all input exents are exchanged then the return code of this
+     *     command will be non-zero.
+     *  5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER!
+     */
+    xen_ulong_t nr_exchanged;
+};
+typedef struct xen_get_dma_buf xen_get_dma_buf_t;
+DEFINE_XEN_GUEST_HANDLE(xen_get_dma_buf_t);
+
+#define XENMEM_put_dma_buf             27
+/*
+ * XENMEM_put_dma_buf unpins a set of pages, previously pinned by
+ * XENMEM_get_dma_buf. After this call the p2m mapping of the pages can
+ * be transparently changed by the hypervisor, as usual. The pages are
+ * still accessible from the guest.
+ */
+struct xen_put_dma_buf {
+    /*
+     * [IN] Details of memory extents to be exchanged (GMFN bases).
+     * Note that @in.address_bits is ignored and unused.
+     */
+    struct xen_memory_reservation in;
+};
+typedef struct xen_put_dma_buf xen_put_dma_buf_t;
+DEFINE_XEN_GUEST_HANDLE(xen_put_dma_buf_t);
+
 #endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
 
 #endif /* __XEN_PUBLIC_MEMORY_H__ */
-- 
1.7.2.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.