|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC 3/3] xen: introduce XENMEM_get_dma_buf and XENMEM_put_dma_buf
Introduce two new hypercalls XENMEM_get_dma_buf and XENMEM_put_dma_buf.
XENMEM_get_dma_buf exchanges a set of pages for a new set, contiguous and
under 4G if so requested. The new pages are going to be "pinned" so that
their p2m mapping won't be changed, until XENMEM_put_dma_buf is called.
XENMEM_get_dma_buf returns the MFNs of the new pages to the caller.
The only effect of XENMEM_put_dma_buf is to "unpin" the previously
pinned pages. Afterwards the p2m mappings can be transparently changed by
the hypervisor as normal. The memory remains accessible from the guest.
XENMEM_put_dma_buf is unimplemented.
Signed-off-by: Stefano Stabellini <stefano.stabellini@xxxxxxxxxxxxx>
---
xen/common/memory.c | 20 +++++++++++--
xen/include/public/memory.h | 64 +++++++++++++++++++++++++++++++++++++++++++
2 files changed, 81 insertions(+), 3 deletions(-)
diff --git a/xen/common/memory.c b/xen/common/memory.c
index 50b740f..2c629c6 100644
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -279,7 +279,7 @@ static void decrease_reservation(struct memop_args *a)
a->nr_done = i;
}
-static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
+static long memory_exchange(int op,
XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
{
struct xen_memory_exchange exch;
PAGE_LIST_HEAD(in_chunk_list);
@@ -496,7 +496,7 @@ static long
memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
mfn = page_to_mfn(page);
guest_physmap_add_page(d, gpfn, mfn, exch.out.extent_order);
- if ( !paging_mode_translate(d) )
+ if ( op == XENMEM_get_dma_buf || !paging_mode_translate(d) )
{
for ( k = 0; k < (1UL << exch.out.extent_order); k++ )
set_gpfn_from_mfn(mfn + k, gpfn + k);
@@ -505,6 +505,17 @@ static long
memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
&mfn, 1) )
rc = -EFAULT;
}
+
+ if ( op == XENMEM_get_dma_buf )
+ {
+ static int warning;
+ rc = guest_physmap_pin_range(d, gpfn, exch.out.extent_order);
+ if ( rc )
+ {
+ gdprintk(XENLOG_WARNING, "guest_physmap_pin_range not
implemented\n");
+ warning = 1;
+ }
+ }
}
BUG_ON( !(d->is_dying) && (j != (1UL << out_chunk_order)) );
}
@@ -630,8 +641,11 @@ long do_memory_op(unsigned long cmd,
XEN_GUEST_HANDLE_PARAM(void) arg)
break;
+ case XENMEM_get_dma_buf:
+ /* xen_get_dma_buf_t is identical to xen_memory_exchange_t, so
+ * just cast it and reuse memory_exchange */
case XENMEM_exchange:
- rc = memory_exchange(guest_handle_cast(arg, xen_memory_exchange_t));
+ rc = memory_exchange(op, guest_handle_cast(arg,
xen_memory_exchange_t));
break;
case XENMEM_maximum_ram_page:
diff --git a/xen/include/public/memory.h b/xen/include/public/memory.h
index 7a26dee..354b117 100644
--- a/xen/include/public/memory.h
+++ b/xen/include/public/memory.h
@@ -459,6 +459,70 @@ DEFINE_XEN_GUEST_HANDLE(xen_mem_sharing_op_t);
* The zero value is appropiate.
*/
+#define XENMEM_get_dma_buf 26
+/*
+ * This hypercall is similar to XENMEM_exchange: it exchanges the pages
+ * passed in with a new set of pages, contiguous and under 4G if so
+ * requested. The new pages are going to be "pinned": it's guaranteed
+ * that their p2m mapping won't be changed until explicitly "unpinned".
+ * If return code is zero then @out.extent_list provides the MFNs of the
+ * newly-allocated memory. Returns zero on complete success, otherwise
+ * a negative error code.
+ * On complete success then always @nr_exchanged == @in.nr_extents. On
+ * partial success @nr_exchanged indicates how much work was done.
+ */
+struct xen_get_dma_buf {
+ /*
+ * [IN] Details of memory extents to be exchanged (GMFN bases).
+ * Note that @in.address_bits is ignored and unused.
+ */
+ struct xen_memory_reservation in;
+
+ /*
+ * [IN/OUT] Details of new memory extents.
+ * We require that:
+ * 1. @in.domid == @out.domid
+ * 2. @in.nr_extents << @in.extent_order ==
+ * @out.nr_extents << @out.extent_order
+ * 3. @in.extent_start and @out.extent_start lists must not overlap
+ * 4. @out.extent_start lists GPFN bases to be populated
+ * 5. @out.extent_start is overwritten with allocated GMFN bases
+ */
+ struct xen_memory_reservation out;
+
+ /*
+ * [OUT] Number of input extents that were successfully exchanged:
+ * 1. The first @nr_exchanged input extents were successfully
+ * deallocated.
+ * 2. The corresponding first entries in the output extent list correctly
+ * indicate the GMFNs that were successfully exchanged.
+ * 3. All other input and output extents are untouched.
+ * 4. If not all input exents are exchanged then the return code of this
+ * command will be non-zero.
+ * 5. THIS FIELD MUST BE INITIALISED TO ZERO BY THE CALLER!
+ */
+ xen_ulong_t nr_exchanged;
+};
+typedef struct xen_get_dma_buf xen_get_dma_buf_t;
+DEFINE_XEN_GUEST_HANDLE(xen_get_dma_buf_t);
+
+#define XENMEM_put_dma_buf 27
+/*
+ * XENMEM_put_dma_buf unpins a set of pages, previously pinned by
+ * XENMEM_get_dma_buf. After this call the p2m mapping of the pages can
+ * be transparently changed by the hypervisor, as usual. The pages are
+ * still accessible from the guest.
+ */
+struct xen_put_dma_buf {
+ /*
+ * [IN] Details of memory extents to be exchanged (GMFN bases).
+ * Note that @in.address_bits is ignored and unused.
+ */
+ struct xen_memory_reservation in;
+};
+typedef struct xen_put_dma_buf xen_put_dma_buf_t;
+DEFINE_XEN_GUEST_HANDLE(xen_put_dma_buf_t);
+
#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
#endif /* __XEN_PUBLIC_MEMORY_H__ */
--
1.7.2.5
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |