|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 3 of 3] Tools: Sanitize mem_event/access/paging interfaces
tools/libxc/xc_mem_access.c | 10 +++++-
tools/libxc/xc_mem_event.c | 16 +++++++---
tools/libxc/xc_mem_paging.c | 10 +++++-
tools/libxc/xenctrl.h | 7 ++-
tools/tests/xen-access/xen-access.c | 56 +++++-------------------------------
tools/xenpaging/xenpaging.c | 33 ++++++---------------
tools/xenpaging/xenpaging.h | 2 +-
xen/arch/x86/mm/mem_event.c | 33 +-------------------
xen/include/public/domctl.h | 4 +-
xen/include/public/mem_event.h | 4 --
xen/include/xen/sched.h | 2 -
11 files changed, 56 insertions(+), 121 deletions(-)
Don't use the superfluous shared page, return the event channel directly as
part of the domctl struct, instead.
Use hypercall buffers to manage the ring page.
In-tree consumers (xenpaging, xen-access) updated. This is an ABI/API change,
so please voice any concerns.
Known pending issues:
- pager could die and its ring page could be used by some other process, yet
Xen retains the mapping to it.
- use a saner interface for the paging_load buffer.
Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
diff -r 26217ae46e45 -r 6325dd552671 tools/libxc/xc_mem_access.c
--- a/tools/libxc/xc_mem_access.c
+++ b/tools/libxc/xc_mem_access.c
@@ -25,12 +25,18 @@
int xc_mem_access_enable(xc_interface *xch, domid_t domain_id,
- void *shared_page, void *ring_page)
+ uint32_t *port, xc_hypercall_buffer_t *ring_page)
{
+ if ( !port )
+ {
+ errno = -EINVAL;
+ return -1;
+ }
+
return xc_mem_event_control(xch, domain_id,
XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE,
XEN_DOMCTL_MEM_EVENT_OP_ACCESS,
- shared_page, ring_page);
+ port, ring_page);
}
int xc_mem_access_disable(xc_interface *xch, domid_t domain_id)
diff -r 26217ae46e45 -r 6325dd552671 tools/libxc/xc_mem_event.c
--- a/tools/libxc/xc_mem_event.c
+++ b/tools/libxc/xc_mem_event.c
@@ -24,19 +24,25 @@
#include "xc_private.h"
int xc_mem_event_control(xc_interface *xch, domid_t domain_id, unsigned int op,
- unsigned int mode, void *page, void *ring_page)
+ unsigned int mode, uint32_t *port,
+ xc_hypercall_buffer_t *ring_page)
{
DECLARE_DOMCTL;
+ DECLARE_HYPERCALL_BUFFER_ARGUMENT(ring_page);
+ int rc;
domctl.cmd = XEN_DOMCTL_mem_event_op;
domctl.domain = domain_id;
domctl.u.mem_event_op.op = op;
domctl.u.mem_event_op.mode = mode;
-
- domctl.u.mem_event_op.shared_addr = (unsigned long)page;
- domctl.u.mem_event_op.ring_addr = (unsigned long)ring_page;
+ domctl.u.mem_event_op.ring_addr = (ring_page) ?
+ (unsigned long long) HYPERCALL_BUFFER_AS_ARG(ring_page) : 0;
- return do_domctl(xch, &domctl);
+ errno = 0;
+ rc = do_domctl(xch, &domctl);
+ if ( !rc && port )
+ *port = domctl.u.mem_event_op.port;
+ return rc;
}
int xc_mem_event_memop(xc_interface *xch, domid_t domain_id,
diff -r 26217ae46e45 -r 6325dd552671 tools/libxc/xc_mem_paging.c
--- a/tools/libxc/xc_mem_paging.c
+++ b/tools/libxc/xc_mem_paging.c
@@ -25,12 +25,18 @@
int xc_mem_paging_enable(xc_interface *xch, domid_t domain_id,
- void *shared_page, void *ring_page)
+ uint32_t *port, xc_hypercall_buffer_t *ring_page)
{
+ if ( !port )
+ {
+ errno = -EINVAL;
+ return -1;
+ }
+
return xc_mem_event_control(xch, domain_id,
XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE,
XEN_DOMCTL_MEM_EVENT_OP_PAGING,
- shared_page, ring_page);
+ port, ring_page);
}
int xc_mem_paging_disable(xc_interface *xch, domid_t domain_id)
diff -r 26217ae46e45 -r 6325dd552671 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -1888,13 +1888,14 @@ int xc_tmem_restore_extra(xc_interface *
* mem_event operations
*/
int xc_mem_event_control(xc_interface *xch, domid_t domain_id, unsigned int op,
- unsigned int mode, void *shared_page, void
*ring_page);
+ unsigned int mode, uint32_t *port,
+ xc_hypercall_buffer_t *ring_page);
int xc_mem_event_memop(xc_interface *xch, domid_t domain_id,
unsigned int op, unsigned int mode,
uint64_t gfn, void *buffer);
int xc_mem_paging_enable(xc_interface *xch, domid_t domain_id,
- void *shared_page, void *ring_page);
+ uint32_t *port, xc_hypercall_buffer_t *ring_page);
int xc_mem_paging_disable(xc_interface *xch, domid_t domain_id);
int xc_mem_paging_nominate(xc_interface *xch, domid_t domain_id,
unsigned long gfn);
@@ -1906,7 +1907,7 @@ int xc_mem_paging_resume(xc_interface *x
unsigned long gfn);
int xc_mem_access_enable(xc_interface *xch, domid_t domain_id,
- void *shared_page, void *ring_page);
+ uint32_t *port, xc_hypercall_buffer_t *ring_page);
int xc_mem_access_disable(xc_interface *xch, domid_t domain_id);
int xc_mem_access_resume(xc_interface *xch, domid_t domain_id,
unsigned long gfn);
diff -r 26217ae46e45 -r 6325dd552671 tools/tests/xen-access/xen-access.c
--- a/tools/tests/xen-access/xen-access.c
+++ b/tools/tests/xen-access/xen-access.c
@@ -98,7 +98,7 @@ typedef struct mem_event {
xc_evtchn *xce_handle;
int port;
mem_event_back_ring_t back_ring;
- mem_event_shared_page_t *shared_page;
+ uint32_t evtchn_port;
void *ring_page;
spinlock_t ring_lock;
} mem_event_t;
@@ -167,36 +167,14 @@ int xc_wait_for_event_or_timeout(xc_inte
return -errno;
}
-static void *init_page(void)
-{
- void *buffer;
- int ret;
-
- /* Allocated page memory */
- ret = posix_memalign(&buffer, PAGE_SIZE, PAGE_SIZE);
- if ( ret != 0 )
- goto out_alloc;
-
- /* Lock buffer in memory so it can't be paged out */
- ret = mlock(buffer, PAGE_SIZE);
- if ( ret != 0 )
- goto out_lock;
-
- return buffer;
-
- munlock(buffer, PAGE_SIZE);
- out_lock:
- free(buffer);
- out_alloc:
- return NULL;
-}
-
xenaccess_t *xenaccess_init(xc_interface **xch_r, domid_t domain_id)
{
xenaccess_t *xenaccess;
xc_interface *xch;
int rc;
+ DECLARE_HYPERCALL_BUFFER(void, ring_page);
+ (void)ring_page;
xch = xc_interface_open(NULL, NULL, 0);
if ( !xch )
goto err_iface;
@@ -214,16 +192,9 @@ xenaccess_t *xenaccess_init(xc_interface
/* Set domain id */
xenaccess->mem_event.domain_id = domain_id;
- /* Initialise shared page */
- xenaccess->mem_event.shared_page = init_page();
- if ( xenaccess->mem_event.shared_page == NULL )
- {
- ERROR("Error initialising shared page");
- goto err;
- }
-
/* Initialise ring page */
- xenaccess->mem_event.ring_page = init_page();
+ xenaccess->mem_event.ring_page =
+ xc_hypercall_buffer_alloc_pages(xenaccess->xc_handle, ring_page, 1);
if ( xenaccess->mem_event.ring_page == NULL )
{
ERROR("Error initialising ring page");
@@ -242,8 +213,8 @@ xenaccess_t *xenaccess_init(xc_interface
/* Initialise Xen */
rc = xc_mem_access_enable(xenaccess->xc_handle,
xenaccess->mem_event.domain_id,
- xenaccess->mem_event.shared_page,
- xenaccess->mem_event.ring_page);
+ &xenaccess->mem_event.evtchn_port,
+ HYPERCALL_BUFFER(ring_page));
if ( rc != 0 )
{
switch ( errno ) {
@@ -271,7 +242,7 @@ xenaccess_t *xenaccess_init(xc_interface
/* Bind event notification */
rc = xc_evtchn_bind_interdomain(xenaccess->mem_event.xce_handle,
xenaccess->mem_event.domain_id,
- xenaccess->mem_event.shared_page->port);
+ xenaccess->mem_event.evtchn_port);
if ( rc < 0 )
{
ERROR("Failed to bind event channel");
@@ -322,17 +293,8 @@ xenaccess_t *xenaccess_init(xc_interface
err:
if ( xenaccess )
{
- if ( xenaccess->mem_event.shared_page )
- {
- munlock(xenaccess->mem_event.shared_page, PAGE_SIZE);
- free(xenaccess->mem_event.shared_page);
- }
-
if ( xenaccess->mem_event.ring_page )
- {
- munlock(xenaccess->mem_event.ring_page, PAGE_SIZE);
- free(xenaccess->mem_event.ring_page);
- }
+ xc_hypercall_buffer_free_pages(xch, ring_page, 1);
free(xenaccess->platform_info);
free(xenaccess->domain_info);
diff -r 26217ae46e45 -r 6325dd552671 tools/xenpaging/xenpaging.c
--- a/tools/xenpaging/xenpaging.c
+++ b/tools/xenpaging/xenpaging.c
@@ -285,6 +285,7 @@ static struct xenpaging *xenpaging_init(
xentoollog_logger *dbg = NULL;
char *p;
int rc;
+ DECLARE_HYPERCALL_BUFFER(void, ring_page);
/* Allocate memory */
paging = calloc(1, sizeof(struct xenpaging));
@@ -341,16 +342,9 @@ static struct xenpaging *xenpaging_init(
goto err;
}
- /* Initialise shared page */
- paging->mem_event.shared_page = init_page();
- if ( paging->mem_event.shared_page == NULL )
- {
- PERROR("Error initialising shared page");
- goto err;
- }
-
/* Initialise ring page */
- paging->mem_event.ring_page = init_page();
+ paging->mem_event.ring_page =
+ xc_hypercall_buffer_alloc_pages(paging->xc_handle, ring_page, 1);
if ( paging->mem_event.ring_page == NULL )
{
PERROR("Error initialising ring page");
@@ -365,8 +359,8 @@ static struct xenpaging *xenpaging_init(
/* Initialise Xen */
rc = xc_mem_paging_enable(xch, paging->mem_event.domain_id,
- paging->mem_event.shared_page,
- paging->mem_event.ring_page);
+ &paging->mem_event.evtchn_port,
+ HYPERCALL_BUFFER(ring_page));
if ( rc != 0 )
{
switch ( errno ) {
@@ -397,7 +391,7 @@ static struct xenpaging *xenpaging_init(
/* Bind event notification */
rc = xc_evtchn_bind_interdomain(paging->mem_event.xce_handle,
paging->mem_event.domain_id,
- paging->mem_event.shared_page->port);
+ paging->mem_event.evtchn_port);
if ( rc < 0 )
{
PERROR("Failed to bind event channel");
@@ -452,19 +446,12 @@ static struct xenpaging *xenpaging_init(
{
if ( paging->xs_handle )
xs_close(paging->xs_handle);
+
+ if ( paging->mem_event.ring_page )
+ xc_hypercall_buffer_free_pages(xch, ring_page, 1);
+
if ( xch )
xc_interface_close(xch);
- if ( paging->mem_event.shared_page )
- {
- munlock(paging->mem_event.shared_page, PAGE_SIZE);
- free(paging->mem_event.shared_page);
- }
-
- if ( paging->mem_event.ring_page )
- {
- munlock(paging->mem_event.ring_page, PAGE_SIZE);
- free(paging->mem_event.ring_page);
- }
free(dom_path);
free(watch_target_tot_pages);
diff -r 26217ae46e45 -r 6325dd552671 tools/xenpaging/xenpaging.h
--- a/tools/xenpaging/xenpaging.h
+++ b/tools/xenpaging/xenpaging.h
@@ -36,7 +36,7 @@ struct mem_event {
xc_evtchn *xce_handle;
int port;
mem_event_back_ring_t back_ring;
- mem_event_shared_page_t *shared_page;
+ uint32_t evtchn_port;
void *ring_page;
};
diff -r 26217ae46e45 -r 6325dd552671 xen/arch/x86/mm/mem_event.c
--- a/xen/arch/x86/mm/mem_event.c
+++ b/xen/arch/x86/mm/mem_event.c
@@ -50,12 +50,10 @@ static int mem_event_enable(
struct domain *dom_mem_event = current->domain;
struct vcpu *v = current;
unsigned long ring_addr = mec->ring_addr;
- unsigned long shared_addr = mec->shared_addr;
l1_pgentry_t l1e;
- unsigned long shared_gfn = 0, ring_gfn = 0; /* gcc ... */
+ unsigned long ring_gfn = 0; /* gcc ... */
p2m_type_t p2mt;
mfn_t ring_mfn;
- mfn_t shared_mfn;
/* Only one helper at a time. If the helper crashed,
* the ring is in an undefined state and so is the guest.
@@ -66,10 +64,6 @@ static int mem_event_enable(
/* Get MFN of ring page */
guest_get_eff_l1e(v, ring_addr, &l1e);
ring_gfn = l1e_get_pfn(l1e);
- /* We're grabbing these two in an order that could deadlock
- * dom0 if 1. it were an hvm 2. there were two concurrent
- * enables 3. the two gfn's in each enable criss-crossed
- * 2MB regions. Duly noted.... */
ring_mfn = get_gfn(dom_mem_event, ring_gfn, &p2mt);
if ( unlikely(!mfn_valid(mfn_x(ring_mfn))) )
@@ -80,23 +74,9 @@ static int mem_event_enable(
mem_event_ring_lock_init(med);
- /* Get MFN of shared page */
- guest_get_eff_l1e(v, shared_addr, &l1e);
- shared_gfn = l1e_get_pfn(l1e);
- shared_mfn = get_gfn(dom_mem_event, shared_gfn, &p2mt);
-
- if ( unlikely(!mfn_valid(mfn_x(shared_mfn))) )
- {
- put_gfn(dom_mem_event, ring_gfn);
- put_gfn(dom_mem_event, shared_gfn);
- return -EINVAL;
- }
-
- /* Map ring and shared pages */
+ /* Map ring page */
med->ring_page = map_domain_page(mfn_x(ring_mfn));
- med->shared_page = map_domain_page(mfn_x(shared_mfn));
put_gfn(dom_mem_event, ring_gfn);
- put_gfn(dom_mem_event, shared_gfn);
/* Set the number of currently blocked vCPUs to 0. */
med->blocked = 0;
@@ -108,8 +88,7 @@ static int mem_event_enable(
if ( rc < 0 )
goto err;
- ((mem_event_shared_page_t *)med->shared_page)->port = rc;
- med->xen_port = rc;
+ med->xen_port = mec->port = rc;
/* Prepare ring buffer */
FRONT_RING_INIT(&med->front_ring,
@@ -125,9 +104,6 @@ static int mem_event_enable(
return 0;
err:
- unmap_domain_page(med->shared_page);
- med->shared_page = NULL;
-
unmap_domain_page(med->ring_page);
med->ring_page = NULL;
@@ -246,9 +222,6 @@ static int mem_event_disable(struct doma
unmap_domain_page(med->ring_page);
med->ring_page = NULL;
- unmap_domain_page(med->shared_page);
- med->shared_page = NULL;
-
/* Unblock all vCPUs */
for_each_vcpu ( d, v )
{
diff -r 26217ae46e45 -r 6325dd552671 xen/include/public/domctl.h
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -746,8 +746,8 @@ struct xen_domctl_mem_event_op {
uint32_t op; /* XEN_DOMCTL_MEM_EVENT_OP_*_* */
uint32_t mode; /* XEN_DOMCTL_MEM_EVENT_OP_* */
- uint64_aligned_t shared_addr; /* IN: Virtual address of shared page */
- uint64_aligned_t ring_addr; /* IN: Virtual address of ring page */
+ uint32_t port; /* OUT: event channel for ring */
+ uint64_aligned_t ring_addr; /* IN: Virtual address of ring page */
};
typedef struct xen_domctl_mem_event_op xen_domctl_mem_event_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_event_op_t);
diff -r 26217ae46e45 -r 6325dd552671 xen/include/public/mem_event.h
--- a/xen/include/public/mem_event.h
+++ b/xen/include/public/mem_event.h
@@ -51,10 +51,6 @@
#define MEM_EVENT_REASON_INT3 5 /* int3 was hit: gla/gfn are RIP */
#define MEM_EVENT_REASON_SINGLESTEP 6 /* single step was invoked: gla/gfn
are RIP */
-typedef struct mem_event_shared_page {
- uint32_t port;
-} mem_event_shared_page_t;
-
typedef struct mem_event_st {
uint16_t type;
uint16_t flags;
diff -r 26217ae46e45 -r 6325dd552671 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -190,8 +190,6 @@ struct mem_event_domain
/* The ring has 64 entries */
unsigned char foreign_producers;
unsigned char target_producers;
- /* shared page */
- mem_event_shared_page_t *shared_page;
/* shared ring page */
void *ring_page;
/* front-end ring */
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |