[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 3 of 8] Use a reserved pfn in the guest address space to store mem event rings



 tools/libxc/xc_domain_restore.c     |  42 ++++++++++++++++++
 tools/libxc/xc_domain_save.c        |  36 ++++++++++++++++
 tools/libxc/xc_hvm_build.c          |  21 ++++++--
 tools/libxc/xc_mem_access.c         |   6 +-
 tools/libxc/xc_mem_event.c          |   3 +-
 tools/libxc/xc_mem_paging.c         |   6 +-
 tools/libxc/xenctrl.h               |   8 +--
 tools/libxc/xg_save_restore.h       |   4 +
 tools/tests/xen-access/xen-access.c |  83 +++++++++++++++++-------------------
 tools/xenpaging/xenpaging.c         |  52 ++++++++++++++++------
 xen/arch/x86/mm/mem_event.c         |  50 ++++++++++------------
 xen/include/public/domctl.h         |   1 -
 xen/include/public/hvm/params.h     |   7 ++-
 xen/include/xen/sched.h             |   1 +
 14 files changed, 214 insertions(+), 106 deletions(-)


This solves a long-standing issue in which the pages backing these rings were
pages belonging to dom0 user-space processes. Thus, if the process would die
unexpectedly, Xen would keep posting events to a page now belonging to some
other process.

We update all API-consumers in tree (xenpaging and xen-access).

This is an API/ABI change, so please speak up if it breaks your accumptions.

The patch touches tools, hypervisor x86/hvm bits, and hypervisor x86/mm bits.

Signed-off-by: Andres Lagar-Cavilla <andres@xxxxxxxxxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
Acked-by: Ian Campbell <ian.campbell@xxxxxxxxxx>

diff -r 2bc84a4a108c -r fc98c32a45c6 tools/libxc/xc_domain_restore.c
--- a/tools/libxc/xc_domain_restore.c
+++ b/tools/libxc/xc_domain_restore.c
@@ -677,6 +677,9 @@ typedef struct {
     int max_vcpu_id;
     uint64_t vcpumap;
     uint64_t identpt;
+    uint64_t paging_ring_pfn;
+    uint64_t access_ring_pfn;
+    uint64_t sharing_ring_pfn;
     uint64_t vm86_tss;
     uint64_t console_pfn;
     uint64_t acpi_ioport_location;
@@ -750,6 +753,39 @@ static int pagebuf_get_one(xc_interface 
         // DPRINTF("EPT identity map address: %llx\n", buf->identpt);
         return pagebuf_get_one(xch, ctx, buf, fd, dom);
 
+    case XC_SAVE_ID_HVM_PAGING_RING_PFN:
+        /* Skip padding 4 bytes then read the paging ring location. */
+        if ( RDEXACT(fd, &buf->paging_ring_pfn, sizeof(uint32_t)) ||
+             RDEXACT(fd, &buf->paging_ring_pfn, sizeof(uint64_t)) )
+        {
+            PERROR("error read the paging ring pfn");
+            return -1;
+        }
+        // DPRINTF("paging ring pfn address: %llx\n", buf->paging_ring_pfn);
+        return pagebuf_get_one(xch, ctx, buf, fd, dom);
+
+    case XC_SAVE_ID_HVM_ACCESS_RING_PFN:
+        /* Skip padding 4 bytes then read the mem access ring location. */
+        if ( RDEXACT(fd, &buf->access_ring_pfn, sizeof(uint32_t)) ||
+             RDEXACT(fd, &buf->access_ring_pfn, sizeof(uint64_t)) )
+        {
+            PERROR("error read the access ring pfn");
+            return -1;
+        }
+        // DPRINTF("access ring pfn address: %llx\n", buf->access_ring_pfn);
+        return pagebuf_get_one(xch, ctx, buf, fd, dom);
+
+    case XC_SAVE_ID_HVM_SHARING_RING_PFN:
+        /* Skip padding 4 bytes then read the sharing ring location. */
+        if ( RDEXACT(fd, &buf->sharing_ring_pfn, sizeof(uint32_t)) ||
+             RDEXACT(fd, &buf->sharing_ring_pfn, sizeof(uint64_t)) )
+        {
+            PERROR("error read the sharing ring pfn");
+            return -1;
+        }
+        // DPRINTF("sharing ring pfn address: %llx\n", buf->sharing_ring_pfn);
+        return pagebuf_get_one(xch, ctx, buf, fd, dom);
+
     case XC_SAVE_ID_HVM_VM86_TSS:
         /* Skip padding 4 bytes then read the vm86 TSS location. */
         if ( RDEXACT(fd, &buf->vm86_tss, sizeof(uint32_t)) ||
@@ -1460,6 +1496,12 @@ int xc_domain_restore(xc_interface *xch,
             /* should this be deferred? does it change? */
             if ( pagebuf.identpt )
                 xc_set_hvm_param(xch, dom, HVM_PARAM_IDENT_PT, 
pagebuf.identpt);
+            if ( pagebuf.paging_ring_pfn )
+                xc_set_hvm_param(xch, dom, HVM_PARAM_PAGING_RING_PFN, 
pagebuf.paging_ring_pfn);
+            if ( pagebuf.access_ring_pfn )
+                xc_set_hvm_param(xch, dom, HVM_PARAM_ACCESS_RING_PFN, 
pagebuf.access_ring_pfn);
+            if ( pagebuf.sharing_ring_pfn )
+                xc_set_hvm_param(xch, dom, HVM_PARAM_SHARING_RING_PFN, 
pagebuf.sharing_ring_pfn);
             if ( pagebuf.vm86_tss )
                 xc_set_hvm_param(xch, dom, HVM_PARAM_VM86_TSS, 
pagebuf.vm86_tss);
             if ( pagebuf.console_pfn )
diff -r 2bc84a4a108c -r fc98c32a45c6 tools/libxc/xc_domain_save.c
--- a/tools/libxc/xc_domain_save.c
+++ b/tools/libxc/xc_domain_save.c
@@ -1639,6 +1639,42 @@ int xc_domain_save(xc_interface *xch, in
             goto out;
         }
 
+        chunk.id = XC_SAVE_ID_HVM_PAGING_RING_PFN;
+        chunk.data = 0;
+        xc_get_hvm_param(xch, dom, HVM_PARAM_PAGING_RING_PFN,
+                         (unsigned long *)&chunk.data);
+
+        if ( (chunk.data != 0) &&
+             wrexact(io_fd, &chunk, sizeof(chunk)) )
+        {
+            PERROR("Error when writing the paging ring pfn for guest");
+            goto out;
+        }
+
+        chunk.id = XC_SAVE_ID_HVM_ACCESS_RING_PFN;
+        chunk.data = 0;
+        xc_get_hvm_param(xch, dom, HVM_PARAM_ACCESS_RING_PFN,
+                         (unsigned long *)&chunk.data);
+
+        if ( (chunk.data != 0) &&
+             wrexact(io_fd, &chunk, sizeof(chunk)) )
+        {
+            PERROR("Error when writing the access ring pfn for guest");
+            goto out;
+        }
+
+        chunk.id = XC_SAVE_ID_HVM_SHARING_RING_PFN;
+        chunk.data = 0;
+        xc_get_hvm_param(xch, dom, HVM_PARAM_SHARING_RING_PFN,
+                         (unsigned long *)&chunk.data);
+
+        if ( (chunk.data != 0) &&
+             wrexact(io_fd, &chunk, sizeof(chunk)) )
+        {
+            PERROR("Error when writing the sharing ring pfn for guest");
+            goto out;
+        }
+
         chunk.id = XC_SAVE_ID_HVM_VM86_TSS;
         chunk.data = 0;
         xc_get_hvm_param(xch, dom, HVM_PARAM_VM86_TSS,
diff -r 2bc84a4a108c -r fc98c32a45c6 tools/libxc/xc_hvm_build.c
--- a/tools/libxc/xc_hvm_build.c
+++ b/tools/libxc/xc_hvm_build.c
@@ -38,12 +38,15 @@
 #define SUPERPAGE_1GB_SHIFT   18
 #define SUPERPAGE_1GB_NR_PFNS (1UL << SUPERPAGE_1GB_SHIFT)
 
-#define SPECIALPAGE_BUFIOREQ 0
-#define SPECIALPAGE_XENSTORE 1
-#define SPECIALPAGE_IOREQ    2
-#define SPECIALPAGE_IDENT_PT 3
-#define SPECIALPAGE_CONSOLE  4
-#define NR_SPECIAL_PAGES     5
+#define SPECIALPAGE_PAGING   0
+#define SPECIALPAGE_ACCESS   1
+#define SPECIALPAGE_SHARING  2
+#define SPECIALPAGE_BUFIOREQ 3
+#define SPECIALPAGE_XENSTORE 4
+#define SPECIALPAGE_IOREQ    5
+#define SPECIALPAGE_IDENT_PT 6
+#define SPECIALPAGE_CONSOLE  7
+#define NR_SPECIAL_PAGES     8
 #define special_pfn(x) (0xff000u - NR_SPECIAL_PAGES + (x))
 
 static void build_hvm_info(void *hvm_info_page, uint64_t mem_size,
@@ -356,6 +359,12 @@ static int setup_guest(xc_interface *xch
                      special_pfn(SPECIALPAGE_IOREQ));
     xc_set_hvm_param(xch, dom, HVM_PARAM_CONSOLE_PFN,
                      special_pfn(SPECIALPAGE_CONSOLE));
+    xc_set_hvm_param(xch, dom, HVM_PARAM_PAGING_RING_PFN,
+                     special_pfn(SPECIALPAGE_PAGING));
+    xc_set_hvm_param(xch, dom, HVM_PARAM_ACCESS_RING_PFN,
+                     special_pfn(SPECIALPAGE_ACCESS));
+    xc_set_hvm_param(xch, dom, HVM_PARAM_SHARING_RING_PFN,
+                     special_pfn(SPECIALPAGE_SHARING));
 
     /*
      * Identity-map page table is required for running with CR0.PG=0 when
diff -r 2bc84a4a108c -r fc98c32a45c6 tools/libxc/xc_mem_access.c
--- a/tools/libxc/xc_mem_access.c
+++ b/tools/libxc/xc_mem_access.c
@@ -25,7 +25,7 @@
 
 
 int xc_mem_access_enable(xc_interface *xch, domid_t domain_id,
-                         uint32_t *port, void *ring_page)
+                         uint32_t *port)
 {
     if ( !port )
     {
@@ -36,7 +36,7 @@ int xc_mem_access_enable(xc_interface *x
     return xc_mem_event_control(xch, domain_id,
                                 XEN_DOMCTL_MEM_EVENT_OP_ACCESS_ENABLE,
                                 XEN_DOMCTL_MEM_EVENT_OP_ACCESS,
-                                port, ring_page);
+                                port);
 }
 
 int xc_mem_access_disable(xc_interface *xch, domid_t domain_id)
@@ -44,7 +44,7 @@ int xc_mem_access_disable(xc_interface *
     return xc_mem_event_control(xch, domain_id,
                                 XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE,
                                 XEN_DOMCTL_MEM_EVENT_OP_ACCESS,
-                                NULL, NULL);
+                                NULL);
 }
 
 int xc_mem_access_resume(xc_interface *xch, domid_t domain_id, unsigned long 
gfn)
diff -r 2bc84a4a108c -r fc98c32a45c6 tools/libxc/xc_mem_event.c
--- a/tools/libxc/xc_mem_event.c
+++ b/tools/libxc/xc_mem_event.c
@@ -24,7 +24,7 @@
 #include "xc_private.h"
 
 int xc_mem_event_control(xc_interface *xch, domid_t domain_id, unsigned int op,
-                         unsigned int mode, uint32_t *port, void *ring_page)
+                         unsigned int mode, uint32_t *port)
 {
     DECLARE_DOMCTL;
     int rc;
@@ -33,7 +33,6 @@ int xc_mem_event_control(xc_interface *x
     domctl.domain = domain_id;
     domctl.u.mem_event_op.op = op;
     domctl.u.mem_event_op.mode = mode;
-    domctl.u.mem_event_op.ring_addr = (unsigned long) ring_page;
     
     rc = do_domctl(xch, &domctl);
     if ( !rc && port )
diff -r 2bc84a4a108c -r fc98c32a45c6 tools/libxc/xc_mem_paging.c
--- a/tools/libxc/xc_mem_paging.c
+++ b/tools/libxc/xc_mem_paging.c
@@ -25,7 +25,7 @@
 
 
 int xc_mem_paging_enable(xc_interface *xch, domid_t domain_id,
-                         uint32_t *port, void *ring_page)
+                         uint32_t *port)
 {
     if ( !port )
     {
@@ -36,7 +36,7 @@ int xc_mem_paging_enable(xc_interface *x
     return xc_mem_event_control(xch, domain_id,
                                 XEN_DOMCTL_MEM_EVENT_OP_PAGING_ENABLE,
                                 XEN_DOMCTL_MEM_EVENT_OP_PAGING,
-                                port, ring_page);
+                                port);
 }
 
 int xc_mem_paging_disable(xc_interface *xch, domid_t domain_id)
@@ -44,7 +44,7 @@ int xc_mem_paging_disable(xc_interface *
     return xc_mem_event_control(xch, domain_id,
                                 XEN_DOMCTL_MEM_EVENT_OP_PAGING_DISABLE,
                                 XEN_DOMCTL_MEM_EVENT_OP_PAGING,
-                                NULL, NULL);
+                                NULL);
 }
 
 int xc_mem_paging_nominate(xc_interface *xch, domid_t domain_id, unsigned long 
gfn)
diff -r 2bc84a4a108c -r fc98c32a45c6 tools/libxc/xenctrl.h
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -1897,13 +1897,12 @@ int xc_tmem_restore_extra(xc_interface *
  * mem_event operations
  */
 int xc_mem_event_control(xc_interface *xch, domid_t domain_id, unsigned int op,
-                         unsigned int mode, uint32_t *port, void *ring_page);
+                         unsigned int mode, uint32_t *port);
 int xc_mem_event_memop(xc_interface *xch, domid_t domain_id, 
                         unsigned int op, unsigned int mode,
                         uint64_t gfn, void *buffer);
 
-int xc_mem_paging_enable(xc_interface *xch, domid_t domain_id,
-                         uint32_t *port, void *ring_page);
+int xc_mem_paging_enable(xc_interface *xch, domid_t domain_id, uint32_t *port);
 int xc_mem_paging_disable(xc_interface *xch, domid_t domain_id);
 int xc_mem_paging_nominate(xc_interface *xch, domid_t domain_id,
                            unsigned long gfn);
@@ -1912,8 +1911,7 @@ int xc_mem_paging_prep(xc_interface *xch
 int xc_mem_paging_load(xc_interface *xch, domid_t domain_id, 
                         unsigned long gfn, void *buffer);
 
-int xc_mem_access_enable(xc_interface *xch, domid_t domain_id,
-                         uint32_t *port, void *ring_page);
+int xc_mem_access_enable(xc_interface *xch, domid_t domain_id, uint32_t *port);
 int xc_mem_access_disable(xc_interface *xch, domid_t domain_id);
 int xc_mem_access_resume(xc_interface *xch, domid_t domain_id,
                          unsigned long gfn);
diff -r 2bc84a4a108c -r fc98c32a45c6 tools/libxc/xg_save_restore.h
--- a/tools/libxc/xg_save_restore.h
+++ b/tools/libxc/xg_save_restore.h
@@ -254,6 +254,10 @@
 #define XC_SAVE_ID_COMPRESSED_DATA    -12 /* Marker to indicate arrival of 
compressed data */
 #define XC_SAVE_ID_ENABLE_COMPRESSION -13 /* Marker to enable compression 
logic at receiver side */
 #define XC_SAVE_ID_HVM_GENERATION_ID_ADDR -14
+/* Markers for the pfn's hosting these mem event rings */
+#define XC_SAVE_ID_HVM_PAGING_RING_PFN  -15
+#define XC_SAVE_ID_HVM_ACCESS_RING_PFN  -16
+#define XC_SAVE_ID_HVM_SHARING_RING_PFN -17
 
 /*
 ** We process save/restore/migrate in batches of pages; the below
diff -r 2bc84a4a108c -r fc98c32a45c6 tools/tests/xen-access/xen-access.c
--- a/tools/tests/xen-access/xen-access.c
+++ b/tools/tests/xen-access/xen-access.c
@@ -166,36 +166,13 @@ int xc_wait_for_event_or_timeout(xc_inte
  err:
     return -errno;
 }
- 
-static void *init_page(void)
-{
-    void *buffer;
-    int ret;
-
-    /* Allocated page memory */
-    ret = posix_memalign(&buffer, PAGE_SIZE, PAGE_SIZE);
-    if ( ret != 0 )
-        goto out_alloc;
-
-    /* Lock buffer in memory so it can't be paged out */
-    ret = mlock(buffer, PAGE_SIZE);
-    if ( ret != 0 )
-        goto out_lock;
-
-    return buffer;
-
-    munlock(buffer, PAGE_SIZE);
- out_lock:
-    free(buffer);
- out_alloc:
-    return NULL;
-}
 
 xenaccess_t *xenaccess_init(xc_interface **xch_r, domid_t domain_id)
 {
     xenaccess_t *xenaccess;
     xc_interface *xch;
     int rc;
+    unsigned long ring_pfn, mmap_pfn;
 
     xch = xc_interface_open(NULL, NULL, 0);
     if ( !xch )
@@ -214,28 +191,42 @@ xenaccess_t *xenaccess_init(xc_interface
     /* Set domain id */
     xenaccess->mem_event.domain_id = domain_id;
 
-    /* Initialise ring page */
-    xenaccess->mem_event.ring_page = init_page();
-    if ( xenaccess->mem_event.ring_page == NULL )
-    {
-        ERROR("Error initialising ring page");
-        goto err;
-    }
-
-
-    /* Initialise ring */
-    SHARED_RING_INIT((mem_event_sring_t *)xenaccess->mem_event.ring_page);
-    BACK_RING_INIT(&xenaccess->mem_event.back_ring,
-                   (mem_event_sring_t *)xenaccess->mem_event.ring_page,
-                   PAGE_SIZE);
-
     /* Initialise lock */
     mem_event_ring_lock_init(&xenaccess->mem_event);
 
+    /* Map the ring page */
+    xc_get_hvm_param(xch, xenaccess->mem_event.domain_id, 
+                        HVM_PARAM_ACCESS_RING_PFN, &ring_pfn);
+    mmap_pfn = ring_pfn;
+    xenaccess->mem_event.ring_page = 
+        xc_map_foreign_batch(xch, xenaccess->mem_event.domain_id, 
+                                PROT_READ | PROT_WRITE, &mmap_pfn, 1);
+    if ( mmap_pfn & XEN_DOMCTL_PFINFO_XTAB )
+    {
+        /* Map failed, populate ring page */
+        rc = xc_domain_populate_physmap_exact(xenaccess->xc_handle, 
+                                              xenaccess->mem_event.domain_id,
+                                              1, 0, 0, &ring_pfn);
+        if ( rc != 0 )
+        {
+            PERROR("Failed to populate ring gfn\n");
+            goto err;
+        }
+
+        mmap_pfn = ring_pfn;
+        xenaccess->mem_event.ring_page = 
+            xc_map_foreign_batch(xch, xenaccess->mem_event.domain_id, 
+                                    PROT_READ | PROT_WRITE, &mmap_pfn, 1);
+        if ( mmap_pfn & XEN_DOMCTL_PFINFO_XTAB )
+        {
+            PERROR("Could not map the ring page\n");
+            goto err;
+        }
+    }
+
     /* Initialise Xen */
     rc = xc_mem_access_enable(xenaccess->xc_handle, 
xenaccess->mem_event.domain_id,
-                             &xenaccess->mem_event.evtchn_port,
-                             xenaccess->mem_event.ring_page);
+                             &xenaccess->mem_event.evtchn_port);
     if ( rc != 0 )
     {
         switch ( errno ) {
@@ -272,6 +263,12 @@ xenaccess_t *xenaccess_init(xc_interface
 
     xenaccess->mem_event.port = rc;
 
+    /* Initialise ring */
+    SHARED_RING_INIT((mem_event_sring_t *)xenaccess->mem_event.ring_page);
+    BACK_RING_INIT(&xenaccess->mem_event.back_ring,
+                   (mem_event_sring_t *)xenaccess->mem_event.ring_page,
+                   PAGE_SIZE);
+
     /* Get platform info */
     xenaccess->platform_info = malloc(sizeof(xc_platform_info_t));
     if ( xenaccess->platform_info == NULL )
@@ -316,8 +313,7 @@ xenaccess_t *xenaccess_init(xc_interface
     {
         if ( xenaccess->mem_event.ring_page )
         {
-            munlock(xenaccess->mem_event.ring_page, PAGE_SIZE);
-            free(xenaccess->mem_event.ring_page);
+            munmap(xenaccess->mem_event.ring_page, PAGE_SIZE);
         }
 
         free(xenaccess->platform_info);
@@ -337,6 +333,7 @@ int xenaccess_teardown(xc_interface *xch
         return 0;
 
     /* Tear down domain xenaccess in Xen */
+    munmap(xenaccess->mem_event.ring_page, PAGE_SIZE);
     rc = xc_mem_access_disable(xenaccess->xc_handle, 
xenaccess->mem_event.domain_id);
     if ( rc != 0 )
     {
diff -r 2bc84a4a108c -r fc98c32a45c6 tools/xenpaging/xenpaging.c
--- a/tools/xenpaging/xenpaging.c
+++ b/tools/xenpaging/xenpaging.c
@@ -281,6 +281,7 @@ static struct xenpaging *xenpaging_init(
     xentoollog_logger *dbg = NULL;
     char *p;
     int rc;
+    unsigned long ring_pfn, mmap_pfn;
 
     /* Allocate memory */
     paging = calloc(1, sizeof(struct xenpaging));
@@ -337,24 +338,39 @@ static struct xenpaging *xenpaging_init(
         goto err;
     }
 
-    /* Initialise ring page */
-    paging->mem_event.ring_page = init_page();
-    if ( paging->mem_event.ring_page == NULL )
+    /* Map the ring page */
+    xc_get_hvm_param(xch, paging->mem_event.domain_id, 
+                        HVM_PARAM_PAGING_RING_PFN, &ring_pfn);
+    mmap_pfn = ring_pfn;
+    paging->mem_event.ring_page = 
+        xc_map_foreign_batch(xch, paging->mem_event.domain_id, 
+                                PROT_READ | PROT_WRITE, &mmap_pfn, 1);
+    if ( mmap_pfn & XEN_DOMCTL_PFINFO_XTAB )
     {
-        PERROR("Error initialising ring page");
-        goto err;
+        /* Map failed, populate ring page */
+        rc = xc_domain_populate_physmap_exact(paging->xc_handle, 
+                                              paging->mem_event.domain_id,
+                                              1, 0, 0, &ring_pfn);
+        if ( rc != 0 )
+        {
+            PERROR("Failed to populate ring gfn\n");
+            goto err;
+        }
+
+        mmap_pfn = ring_pfn;
+        paging->mem_event.ring_page = 
+            xc_map_foreign_batch(xch, paging->mem_event.domain_id, 
+                                    PROT_READ | PROT_WRITE, &mmap_pfn, 1);
+        if ( mmap_pfn & XEN_DOMCTL_PFINFO_XTAB )
+        {
+            PERROR("Could not map the ring page\n");
+            goto err;
+        }
     }
-
-    /* Initialise ring */
-    SHARED_RING_INIT((mem_event_sring_t *)paging->mem_event.ring_page);
-    BACK_RING_INIT(&paging->mem_event.back_ring,
-                   (mem_event_sring_t *)paging->mem_event.ring_page,
-                   PAGE_SIZE);
     
     /* Initialise Xen */
     rc = xc_mem_paging_enable(xch, paging->mem_event.domain_id,
-                             &paging->mem_event.evtchn_port, 
-                             paging->mem_event.ring_page);
+                             &paging->mem_event.evtchn_port);
     if ( rc != 0 )
     {
         switch ( errno ) {
@@ -394,6 +410,12 @@ static struct xenpaging *xenpaging_init(
 
     paging->mem_event.port = rc;
 
+    /* Initialise ring */
+    SHARED_RING_INIT((mem_event_sring_t *)paging->mem_event.ring_page);
+    BACK_RING_INIT(&paging->mem_event.back_ring,
+                   (mem_event_sring_t *)paging->mem_event.ring_page,
+                   PAGE_SIZE);
+
     /* Get max_pages from guest if not provided via cmdline */
     if ( !paging->max_pages )
     {
@@ -469,8 +491,7 @@ static struct xenpaging *xenpaging_init(
 
         if ( paging->mem_event.ring_page )
         {
-            munlock(paging->mem_event.ring_page, PAGE_SIZE);
-            free(paging->mem_event.ring_page);
+            munmap(paging->mem_event.ring_page, PAGE_SIZE);
         }
 
         free(dom_path);
@@ -495,6 +516,7 @@ static void xenpaging_teardown(struct xe
 
     paging->xc_handle = NULL;
     /* Tear down domain paging in Xen */
+    munmap(paging->mem_event.ring_page, PAGE_SIZE);
     rc = xc_mem_paging_disable(xch, paging->mem_event.domain_id);
     if ( rc != 0 )
     {
diff -r 2bc84a4a108c -r fc98c32a45c6 xen/arch/x86/mm/mem_event.c
--- a/xen/arch/x86/mm/mem_event.c
+++ b/xen/arch/x86/mm/mem_event.c
@@ -44,16 +44,11 @@ static int mem_event_enable(
     xen_domctl_mem_event_op_t *mec,
     struct mem_event_domain *med,
     int pause_flag,
+    int param,
     xen_event_channel_notification_t notification_fn)
 {
     int rc;
-    struct domain *dom_mem_event = current->domain;
-    struct vcpu *v = current;
-    unsigned long ring_addr = mec->ring_addr;
-    l1_pgentry_t l1e;
-    unsigned long ring_gfn = 0; /* gcc ... */
-    p2m_type_t p2mt;
-    mfn_t ring_mfn;
+    unsigned long ring_gfn = d->arch.hvm_domain.params[param];
 
     /* Only one helper at a time. If the helper crashed,
      * the ring is in an undefined state and so is the guest.
@@ -61,22 +56,18 @@ static int mem_event_enable(
     if ( med->ring_page )
         return -EBUSY;
 
-    /* Get MFN of ring page */
-    guest_get_eff_l1e(v, ring_addr, &l1e);
-    ring_gfn = l1e_get_pfn(l1e);
-    ring_mfn = get_gfn(dom_mem_event, ring_gfn, &p2mt);
-
-    if ( unlikely(!mfn_valid(mfn_x(ring_mfn))) )
-    {
-        put_gfn(dom_mem_event, ring_gfn);
-        return -EINVAL;
-    }
+    /* The parameter defaults to zero, and it should be 
+     * set to something */
+    if ( ring_gfn == 0 )
+        return -ENOSYS;
 
     mem_event_ring_lock_init(med);
+    mem_event_ring_lock(med);
 
-    /* Map ring page */
-    med->ring_page = map_domain_page(mfn_x(ring_mfn));
-    put_gfn(dom_mem_event, ring_gfn);
+    rc = prepare_ring_for_helper(d, ring_gfn, &med->ring_pg_struct, 
+                                    &med->ring_page);
+    if ( rc < 0 )
+        goto err;
 
     /* Set the number of currently blocked vCPUs to 0. */
     med->blocked = 0;
@@ -101,11 +92,13 @@ static int mem_event_enable(
     /* Initialize the last-chance wait queue. */
     init_waitqueue_head(&med->wq);
 
+    mem_event_ring_unlock(med);
     return 0;
 
  err:
-    unmap_domain_page(med->ring_page);
-    med->ring_page = NULL;
+    destroy_ring_for_helper(&med->ring_page, 
+                            med->ring_pg_struct);
+    mem_event_ring_unlock(med);
 
     return rc;
 }
@@ -221,9 +214,6 @@ static int mem_event_disable(struct doma
 
         /* Free domU's event channel and leave the other one unbound */
         free_xen_event_channel(d->vcpu[0], med->xen_port);
-        
-        unmap_domain_page(med->ring_page);
-        med->ring_page = NULL;
 
         /* Unblock all vCPUs */
         for_each_vcpu ( d, v )
@@ -235,6 +225,8 @@ static int mem_event_disable(struct doma
             }
         }
 
+        destroy_ring_for_helper(&med->ring_page, 
+                                med->ring_pg_struct);
         mem_event_ring_unlock(med);
     }
 
@@ -549,7 +541,9 @@ int mem_event_domctl(struct domain *d, x
             if ( p2m->pod.entry_count )
                 break;
 
-            rc = mem_event_enable(d, mec, med, _VPF_mem_paging, 
mem_paging_notification);
+            rc = mem_event_enable(d, mec, med, _VPF_mem_paging, 
+                                    HVM_PARAM_PAGING_RING_PFN,
+                                    mem_paging_notification);
         }
         break;
 
@@ -585,7 +579,9 @@ int mem_event_domctl(struct domain *d, x
             if ( boot_cpu_data.x86_vendor != X86_VENDOR_INTEL )
                 break;
 
-            rc = mem_event_enable(d, mec, med, _VPF_mem_access, 
mem_access_notification);
+            rc = mem_event_enable(d, mec, med, _VPF_mem_access, 
+                                    HVM_PARAM_ACCESS_RING_PFN,
+                                    mem_access_notification);
         }
         break;
 
diff -r 2bc84a4a108c -r fc98c32a45c6 xen/include/public/domctl.h
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -748,7 +748,6 @@ struct xen_domctl_mem_event_op {
     uint32_t       mode;         /* XEN_DOMCTL_MEM_EVENT_OP_* */
 
     uint32_t port;              /* OUT: event channel for ring */
-    uint64_aligned_t ring_addr; /* IN:  Virtual address of ring page */
 };
 typedef struct xen_domctl_mem_event_op xen_domctl_mem_event_op_t;
 DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_event_op_t);
diff -r 2bc84a4a108c -r fc98c32a45c6 xen/include/public/hvm/params.h
--- a/xen/include/public/hvm/params.h
+++ b/xen/include/public/hvm/params.h
@@ -149,6 +149,11 @@
 /* Boolean: Enable nestedhvm (hvm only) */
 #define HVM_PARAM_NESTEDHVM    24
 
-#define HVM_NR_PARAMS          27
+/* Params for the mem event rings */
+#define HVM_PARAM_PAGING_RING_PFN   27
+#define HVM_PARAM_ACCESS_RING_PFN   28
+#define HVM_PARAM_SHARING_RING_PFN  29
+
+#define HVM_NR_PARAMS          30
 
 #endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
diff -r 2bc84a4a108c -r fc98c32a45c6 xen/include/xen/sched.h
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -192,6 +192,7 @@ struct mem_event_domain
     unsigned char target_producers;
     /* shared ring page */
     void *ring_page;
+    struct page_info *ring_pg_struct;
     /* front-end ring */
     mem_event_front_ring_t front_ring;
     /* event channel port (vcpu0 only) */

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.