[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 1 of 7] Tools: Sanitize mem_event/access/paging interfaces



 tools/libxc/xc_mem_access.c         |  10 ++++++++--
 tools/libxc/xc_mem_event.c          |  13 ++++++++-----
 tools/libxc/xc_mem_paging.c         |  10 ++++++++--
 tools/libxc/xenctrl.h               |   6 +++---
 tools/tests/xen-access/xen-access.c |  22 ++++------------------
 tools/xenpaging/xenpaging.c         |  18 +++---------------
 tools/xenpaging/xenpaging.h         |   2 +-
 xen/arch/x86/mm/mem_event.c         |  33 +++------------------------------
 xen/include/public/domctl.h         |   4 ++--
 xen/include/public/mem_event.h      |   4 ----
 xen/include/xen/sched.h             |   2 --
 11 files changed, 40 insertions(+), 84 deletions(-)


Don't use the superfluous shared page, return the event channel directly as
part of the domctl struct, instead.

In-tree consumers (xenpaging, xen-access) updated. This is an ABI/API change,
so please voice any concerns.

Known pending issues:
- pager could die and its ring page could be used by some other process, yet
  Xen retains the mapping to it.
- use a saner interface for the paging_load buffer.

This change also affects the x86/mm bits in the hypervisor that process the
mem_event setup domctl.

Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>

diff -r 8ddd13cc783e -r 3b24188d8815 tools/libxc/xc_mem_access.c
--- a/tools/libxc/xc_mem_access.c
+++ b/tools/libxc/xc_mem_access.c
@@ -25,12 +25,18 @@
 
 
 int xc_mem_access_enable(xc_interface *xch, domid_t domain_id,
-                        void *shared_page, void *ring_page)
+                         uint32_t *port, void *ring_page)
 {
+    if ( !port )
+    {
+        errno = -EINVAL;
+        return -1;
+    }
+
     return xc_mem_event_control(xch, domain_id,
                                 XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE,
                                 XEN_DOMCTL_MEM_EVENT_OP_ACCESS,
-                                shared_page, ring_page);
+                                port, ring_page);
 }
 
 int xc_mem_access_disable(xc_interface *xch, domid_t domain_id)
diff -r 8ddd13cc783e -r 3b24188d8815 tools/libxc/xc_mem_event.c
--- a/tools/libxc/xc_mem_event.c
+++ b/tools/libxc/xc_mem_event.c
@@ -24,19 +24,22 @@
 #include "xc_private.h"
 
 int xc_mem_event_control(xc_interface *xch, domid_t domain_id, unsigned int op,
-                         unsigned int mode, void *page, void *ring_page)
+                         unsigned int mode, uint32_t *port, void *ring_page)
 {
     DECLARE_DOMCTL;
+    int rc;
 
     domctl.cmd = XEN_DOMCTL_mem_event_op;
     domctl.domain = domain_id;
     domctl.u.mem_event_op.op = op;
     domctl.u.mem_event_op.mode = mode;
-
-    domctl.u.mem_event_op.shared_addr = (unsigned long)page;
-    domctl.u.mem_event_op.ring_addr = (unsigned long)ring_page;
+    domctl.u.mem_event_op.ring_addr = (unsigned long) ring_page;
     
-    return do_domctl(xch, &domctl);
+    errno = 0;
+    rc = do_domctl(xch, &domctl);
+    if ( !rc && port )
+        *port = domctl.u.mem_event_op.port;
+    return rc;
 }
 
 int xc_mem_event_memop(xc_interface *xch, domid_t domain_id, 
diff -r 8ddd13cc783e -r 3b24188d8815 tools/libxc/xc_mem_paging.c
--- a/tools/libxc/xc_mem_paging.c
+++ b/tools/libxc/xc_mem_paging.c
@@ -25,12 +25,18 @@
 
 
 int xc_mem_paging_enable(xc_interface *xch, domid_t domain_id,
-                        void *shared_page, void *ring_page)
+                         uint32_t *port, void *ring_page)
 {
+    if ( !port )
+    {
+        errno = -EINVAL;
+        return -1;
+    }
+        
     return xc_mem_event_control(xch, domain_id,
                                 XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE,
                                 XEN_DOMCTL_MEM_EVENT_OP_PAGING,
-                                shared_page, ring_page);
+                                port, ring_page);
 }
 
 int xc_mem_paging_disable(xc_interface *xch, domid_t domain_id)
diff -r 8ddd13cc783e -r 3b24188d8815 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -1888,13 +1888,13 @@ int xc_tmem_restore_extra(xc_interface *
  * mem_event operations
  */
 int xc_mem_event_control(xc_interface *xch, domid_t domain_id, unsigned int op,
-                         unsigned int mode, void *shared_page, void 
*ring_page);
+                         unsigned int mode, uint32_t *port, void *ring_page);
 int xc_mem_event_memop(xc_interface *xch, domid_t domain_id, 
                         unsigned int op, unsigned int mode,
                         uint64_t gfn, void *buffer);
 
 int xc_mem_paging_enable(xc_interface *xch, domid_t domain_id,
-                        void *shared_page, void *ring_page);
+                         uint32_t *port, void *ring_page);
 int xc_mem_paging_disable(xc_interface *xch, domid_t domain_id);
 int xc_mem_paging_nominate(xc_interface *xch, domid_t domain_id,
                            unsigned long gfn);
@@ -1906,7 +1906,7 @@ int xc_mem_paging_resume(xc_interface *x
                          unsigned long gfn);
 
 int xc_mem_access_enable(xc_interface *xch, domid_t domain_id,
-                        void *shared_page, void *ring_page);
+                         uint32_t *port, void *ring_page);
 int xc_mem_access_disable(xc_interface *xch, domid_t domain_id);
 int xc_mem_access_resume(xc_interface *xch, domid_t domain_id,
                          unsigned long gfn);
diff -r 8ddd13cc783e -r 3b24188d8815 tools/tests/xen-access/xen-access.c
--- a/tools/tests/xen-access/xen-access.c
+++ b/tools/tests/xen-access/xen-access.c
@@ -98,7 +98,7 @@ typedef struct mem_event {
     xc_evtchn *xce_handle;
     int port;
     mem_event_back_ring_t back_ring;
-    mem_event_shared_page_t *shared_page;
+    uint32_t evtchn_port;
     void *ring_page;
     spinlock_t ring_lock;
 } mem_event_t;
@@ -166,7 +166,7 @@ int xc_wait_for_event_or_timeout(xc_inte
  err:
     return -errno;
 }
-
+ 
 static void *init_page(void)
 {
     void *buffer;
@@ -214,14 +214,6 @@ xenaccess_t *xenaccess_init(xc_interface
     /* Set domain id */
     xenaccess->mem_event.domain_id = domain_id;
 
-    /* Initialise shared page */
-    xenaccess->mem_event.shared_page = init_page();
-    if ( xenaccess->mem_event.shared_page == NULL )
-    {
-        ERROR("Error initialising shared page");
-        goto err;
-    }
-
     /* Initialise ring page */
     xenaccess->mem_event.ring_page = init_page();
     if ( xenaccess->mem_event.ring_page == NULL )
@@ -242,7 +234,7 @@ xenaccess_t *xenaccess_init(xc_interface
 
     /* Initialise Xen */
     rc = xc_mem_access_enable(xenaccess->xc_handle, 
xenaccess->mem_event.domain_id,
-                             xenaccess->mem_event.shared_page,
+                             &xenaccess->mem_event.evtchn_port,
                              xenaccess->mem_event.ring_page);
     if ( rc != 0 )
     {
@@ -271,7 +263,7 @@ xenaccess_t *xenaccess_init(xc_interface
     /* Bind event notification */
     rc = xc_evtchn_bind_interdomain(xenaccess->mem_event.xce_handle,
                                     xenaccess->mem_event.domain_id,
-                                    xenaccess->mem_event.shared_page->port);
+                                    xenaccess->mem_event.evtchn_port);
     if ( rc < 0 )
     {
         ERROR("Failed to bind event channel");
@@ -322,12 +314,6 @@ xenaccess_t *xenaccess_init(xc_interface
  err:
     if ( xenaccess )
     {
-        if ( xenaccess->mem_event.shared_page )
-        {
-            munlock(xenaccess->mem_event.shared_page, PAGE_SIZE);
-            free(xenaccess->mem_event.shared_page);
-        }
-
         if ( xenaccess->mem_event.ring_page )
         {
             munlock(xenaccess->mem_event.ring_page, PAGE_SIZE);
diff -r 8ddd13cc783e -r 3b24188d8815 tools/xenpaging/xenpaging.c
--- a/tools/xenpaging/xenpaging.c
+++ b/tools/xenpaging/xenpaging.c
@@ -341,14 +341,6 @@ static struct xenpaging *xenpaging_init(
         goto err;
     }
 
-    /* Initialise shared page */
-    paging->mem_event.shared_page = init_page();
-    if ( paging->mem_event.shared_page == NULL )
-    {
-        PERROR("Error initialising shared page");
-        goto err;
-    }
-
     /* Initialise ring page */
     paging->mem_event.ring_page = init_page();
     if ( paging->mem_event.ring_page == NULL )
@@ -365,7 +357,7 @@ static struct xenpaging *xenpaging_init(
     
     /* Initialise Xen */
     rc = xc_mem_paging_enable(xch, paging->mem_event.domain_id,
-                             paging->mem_event.shared_page,
+                             &paging->mem_event.evtchn_port, 
                              paging->mem_event.ring_page);
     if ( rc != 0 )
     {
@@ -397,7 +389,7 @@ static struct xenpaging *xenpaging_init(
     /* Bind event notification */
     rc = xc_evtchn_bind_interdomain(paging->mem_event.xce_handle,
                                     paging->mem_event.domain_id,
-                                    paging->mem_event.shared_page->port);
+                                    paging->mem_event.evtchn_port);
     if ( rc < 0 )
     {
         PERROR("Failed to bind event channel");
@@ -452,13 +444,9 @@ static struct xenpaging *xenpaging_init(
     {
         if ( paging->xs_handle )
             xs_close(paging->xs_handle);
+
         if ( xch )
             xc_interface_close(xch);
-        if ( paging->mem_event.shared_page )
-        {
-            munlock(paging->mem_event.shared_page, PAGE_SIZE);
-            free(paging->mem_event.shared_page);
-        }
 
         if ( paging->mem_event.ring_page )
         {
diff -r 8ddd13cc783e -r 3b24188d8815 tools/xenpaging/xenpaging.h
--- a/tools/xenpaging/xenpaging.h
+++ b/tools/xenpaging/xenpaging.h
@@ -36,7 +36,7 @@ struct mem_event {
     xc_evtchn *xce_handle;
     int port;
     mem_event_back_ring_t back_ring;
-    mem_event_shared_page_t *shared_page;
+    uint32_t evtchn_port;
     void *ring_page;
 };
 
diff -r 8ddd13cc783e -r 3b24188d8815 xen/arch/x86/mm/mem_event.c
--- a/xen/arch/x86/mm/mem_event.c
+++ b/xen/arch/x86/mm/mem_event.c
@@ -50,12 +50,10 @@ static int mem_event_enable(
     struct domain *dom_mem_event = current->domain;
     struct vcpu *v = current;
     unsigned long ring_addr = mec->ring_addr;
-    unsigned long shared_addr = mec->shared_addr;
     l1_pgentry_t l1e;
-    unsigned long shared_gfn = 0, ring_gfn = 0; /* gcc ... */
+    unsigned long ring_gfn = 0; /* gcc ... */
     p2m_type_t p2mt;
     mfn_t ring_mfn;
-    mfn_t shared_mfn;
 
     /* Only one helper at a time. If the helper crashed,
      * the ring is in an undefined state and so is the guest.
@@ -66,10 +64,6 @@ static int mem_event_enable(
     /* Get MFN of ring page */
     guest_get_eff_l1e(v, ring_addr, &l1e);
     ring_gfn = l1e_get_pfn(l1e);
-    /* We're grabbing these two in an order that could deadlock
-     * dom0 if 1. it were an hvm 2. there were two concurrent
-     * enables 3. the two gfn's in each enable criss-crossed
-     * 2MB regions. Duly noted.... */
     ring_mfn = get_gfn(dom_mem_event, ring_gfn, &p2mt);
 
     if ( unlikely(!mfn_valid(mfn_x(ring_mfn))) )
@@ -80,23 +74,9 @@ static int mem_event_enable(
 
     mem_event_ring_lock_init(med);
 
-    /* Get MFN of shared page */
-    guest_get_eff_l1e(v, shared_addr, &l1e);
-    shared_gfn = l1e_get_pfn(l1e);
-    shared_mfn = get_gfn(dom_mem_event, shared_gfn, &p2mt);
-
-    if ( unlikely(!mfn_valid(mfn_x(shared_mfn))) )
-    {
-        put_gfn(dom_mem_event, ring_gfn);
-        put_gfn(dom_mem_event, shared_gfn);
-        return -EINVAL;
-    }
-
-    /* Map ring and shared pages */
+    /* Map ring page */
     med->ring_page = map_domain_page(mfn_x(ring_mfn));
-    med->shared_page = map_domain_page(mfn_x(shared_mfn));
     put_gfn(dom_mem_event, ring_gfn);
-    put_gfn(dom_mem_event, shared_gfn); 
 
     /* Set the number of currently blocked vCPUs to 0. */
     med->blocked = 0;
@@ -108,8 +88,7 @@ static int mem_event_enable(
     if ( rc < 0 )
         goto err;
 
-    ((mem_event_shared_page_t *)med->shared_page)->port = rc;
-    med->xen_port = rc;
+    med->xen_port = mec->port = rc;
 
     /* Prepare ring buffer */
     FRONT_RING_INIT(&med->front_ring,
@@ -125,9 +104,6 @@ static int mem_event_enable(
     return 0;
 
  err:
-    unmap_domain_page(med->shared_page);
-    med->shared_page = NULL;
-
     unmap_domain_page(med->ring_page);
     med->ring_page = NULL;
 
@@ -249,9 +225,6 @@ static int mem_event_disable(struct doma
         unmap_domain_page(med->ring_page);
         med->ring_page = NULL;
 
-        unmap_domain_page(med->shared_page);
-        med->shared_page = NULL;
-
         /* Unblock all vCPUs */
         for_each_vcpu ( d, v )
         {
diff -r 8ddd13cc783e -r 3b24188d8815 xen/include/public/domctl.h
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -746,8 +746,8 @@ struct xen_domctl_mem_event_op {
     uint32_t       op;           /* XEN_DOMCTL_MEM_EVENT_OP_*_* */
     uint32_t       mode;         /* XEN_DOMCTL_MEM_EVENT_OP_* */
 
-    uint64_aligned_t shared_addr;  /* IN:  Virtual address of shared page */
-    uint64_aligned_t ring_addr;    /* IN:  Virtual address of ring page */
+    uint32_t port;              /* OUT: event channel for ring */
+    uint64_aligned_t ring_addr; /* IN:  Virtual address of ring page */
 };
 typedef struct xen_domctl_mem_event_op xen_domctl_mem_event_op_t;
 DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_event_op_t);
diff -r 8ddd13cc783e -r 3b24188d8815 xen/include/public/mem_event.h
--- a/xen/include/public/mem_event.h
+++ b/xen/include/public/mem_event.h
@@ -51,10 +51,6 @@
 #define MEM_EVENT_REASON_INT3        5    /* int3 was hit: gla/gfn are RIP */
 #define MEM_EVENT_REASON_SINGLESTEP  6    /* single step was invoked: gla/gfn 
are RIP */
 
-typedef struct mem_event_shared_page {
-    uint32_t port;
-} mem_event_shared_page_t;
-
 typedef struct mem_event_st {
     uint16_t type;
     uint16_t flags;
diff -r 8ddd13cc783e -r 3b24188d8815 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -190,8 +190,6 @@ struct mem_event_domain
     /* The ring has 64 entries */
     unsigned char foreign_producers;
     unsigned char target_producers;
-    /* shared page */
-    mem_event_shared_page_t *shared_page;
     /* shared ring page */
     void *ring_page;
     /* front-end ring */

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.