[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v0 2/3] mem_event: Added new helper API to teardown mem event setup and unmap ring_page.



tools/libxc/xc_mem_event.c: Added new generic API to teardown mem event setup,
the API supports hvm params PAGING, ACCESS and SHARING and also completes the
obvious job of unmapping ring_page.

tools/libxc/xc_mem_access.c: Modified mem_event_disable to use the new teardown 
API.

tools/tests/xen-access/: Updated code to use the new API's.

Signed-off-by: Dushyant Behl <myselfdushyantbehl@xxxxxxxxx>
---
 tools/libxc/xc_mem_access.c         |  9 +++---
 tools/libxc/xc_mem_event.c          | 59 +++++++++++++++++++++++++++++++++++++
 tools/libxc/xc_private.h            |  8 +++++
 tools/libxc/xenctrl.h               |  3 +-
 tools/tests/xen-access/xen-access.c |  6 ++--
 5 files changed, 74 insertions(+), 11 deletions(-)

diff --git a/tools/libxc/xc_mem_access.c b/tools/libxc/xc_mem_access.c
index 89050be..ea24689 100644
--- a/tools/libxc/xc_mem_access.c
+++ b/tools/libxc/xc_mem_access.c
@@ -33,12 +33,11 @@ int xc_mem_access_enable(xc_interface *xch, domid_t 
domain_id,
                                port, ring_page, back_ring);
 }
 
-int xc_mem_access_disable(xc_interface *xch, domid_t domain_id)
+int xc_mem_access_disable(xc_interface *xch, domid_t domain_id, void 
*ring_page)
 {
-    return xc_mem_event_control(xch, domain_id,
-                                XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE,
-                                XEN_DOMCTL_MEM_EVENT_OP_ACCESS,
-                                NULL);
+    return xc_mem_event_teardown(xch, domain_id,
+                                HVM_PARAM_ACCESS_RING_PFN,
+                                ring_page);
 }
 
 int xc_mem_access_resume(xc_interface *xch, domid_t domain_id)
diff --git a/tools/libxc/xc_mem_event.c b/tools/libxc/xc_mem_event.c
index 3525a83..6cd1894 100644
--- a/tools/libxc/xc_mem_event.c
+++ b/tools/libxc/xc_mem_event.c
@@ -196,3 +196,62 @@ int xc_mem_event_enable(xc_interface *xch, domid_t 
domain_id, int param,
 
     return rc1;
 }
+
+/*
+ * Teardown mem_event
+ * returns 0 on success, if failure returns -errno with errno properly set.
+ * param can be HVM_PARAM_PAGING/ACCESS/SHARING_RING_PFN
+ */
+int xc_mem_event_teardown(xc_interface *xch, domid_t domain_id,
+                          int param, void *ring_page)
+{
+    int rc;
+    unsigned int op, mode;
+
+    switch ( param )
+    {
+        case HVM_PARAM_PAGING_RING_PFN:
+            op = XEN_DOMCTL_MEM_EVENT_OP_PAGING_DISABLE;
+            mode = XEN_DOMCTL_MEM_EVENT_OP_PAGING;
+            break;
+
+        case HVM_PARAM_ACCESS_RING_PFN:
+            op = XEN_DOMCTL_MEM_EVENT_OP_ACCESS_DISABLE;
+            mode = XEN_DOMCTL_MEM_EVENT_OP_ACCESS;
+            break;
+
+        case HVM_PARAM_SHARING_RING_PFN:
+            op = XEN_DOMCTL_MEM_EVENT_OP_SHARING_DISABLE;
+            mode = XEN_DOMCTL_MEM_EVENT_OP_SHARING;
+            break;
+
+        /*
+         * This is for the outside chance that the HVM_PARAM is valid but is 
invalid
+         * as far as mem_event goes.
+         */
+        default:
+            errno = EINVAL;
+            rc = -1;
+            goto out;
+    }
+
+    /* Remove the ring page. */
+    rc = munmap(ring_page, PAGE_SIZE);
+    if ( rc < 0 )
+        PERROR("Error while disabling paging in xen");
+
+    ring_page = NULL;
+
+    rc = xc_mem_event_control(xch, domain_id, op, mode, NULL);
+    if ( rc != 0 )
+    {
+        PERROR("Failed to disable mem_event\n");
+        goto out;
+    }
+
+  out:
+    if (rc != 0)
+        rc = -errno;
+
+    return rc;
+}
diff --git a/tools/libxc/xc_private.h b/tools/libxc/xc_private.h
index cf9b223..7120a08 100644
--- a/tools/libxc/xc_private.h
+++ b/tools/libxc/xc_private.h
@@ -387,4 +387,12 @@ int xc_mem_event_enable(xc_interface *xch, domid_t 
domain_id, int param,
                         uint32_t *port, void *ring_page,
                         mem_event_back_ring_t *back_ring);
 
+/*
+ * Teardown mem_event
+ * returns 0 on success, if failure returns -errno with errno properly set.
+ * param can be HVM_PARAM_PAGING/ACCESS/SHARING_RING_PFN
+ */
+int xc_mem_event_teardown(xc_interface *xch, domid_t domain_id,
+                          int param, void *ring_page);
+
 #endif /* __XC_PRIVATE_H__ */
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
index d21f026..cfd6019 100644
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -2261,12 +2261,11 @@ int xc_mem_paging_load(xc_interface *xch, domid_t 
domain_id,
 /*
  * Enables mem_access and sets arg ring page equal to mapped page.
  * Will return 0 on success and -errno on error.
- * Caller has to unmap this page when done.
  */
 int xc_mem_access_enable(xc_interface *xch, domid_t domain_id,
                          uint32_t *port, void *ring_page,
                          mem_event_back_ring_t *back_ring);
-int xc_mem_access_disable(xc_interface *xch, domid_t domain_id);
+int xc_mem_access_disable(xc_interface *xch, domid_t domain_id, void 
*ring_page);
 int xc_mem_access_resume(xc_interface *xch, domid_t domain_id);
 
 /*
diff --git a/tools/tests/xen-access/xen-access.c 
b/tools/tests/xen-access/xen-access.c
index 9242f86..a4ef578 100644
--- a/tools/tests/xen-access/xen-access.c
+++ b/tools/tests/xen-access/xen-access.c
@@ -170,13 +170,11 @@ int xenaccess_teardown(xc_interface *xch, xenaccess_t 
*xenaccess)
         return 0;
 
     /* Tear down domain xenaccess in Xen */
-    if ( xenaccess->mem_event.ring_page )
-        munmap(xenaccess->mem_event.ring_page, XC_PAGE_SIZE);
-
     if ( mem_access_enable )
     {
         rc = xc_mem_access_disable(xenaccess->xc_handle,
-                                   xenaccess->mem_event.domain_id);
+                                   xenaccess->mem_event.domain_id,
+                                   xenaccess->mem_event.ring_page);
         if ( rc != 0 )
         {
             ERROR("Error tearing down domain xenaccess in xen");
-- 
1.9.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.