[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 12 of 24] libxc: convert domctl interfaces over to hypercall buffers



# HG changeset patch
# User Ian Campbell <ian.campbell@xxxxxxxxxx>
# Date 1283779691 -3600
# Node ID 7f735088ac1d169d140d978441c772b62083fae0
# Parent  f3b26cbd7eb5cc0ce7321aaae9eefb821192e86f
libxc: convert domctl interfaces over to hypercall buffers

(defer save/restore and shadow related interfaces til a later patch)

Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>

diff -r f3b26cbd7eb5 -r 7f735088ac1d tools/libxc/ia64/xc_ia64_stubs.c
--- a/tools/libxc/ia64/xc_ia64_stubs.c  Mon Sep 06 14:28:11 2010 +0100
+++ b/tools/libxc/ia64/xc_ia64_stubs.c  Mon Sep 06 14:28:11 2010 +0100
@@ -46,7 +46,7 @@ xc_ia64_get_pfn_list(xc_interface *xch, 
     domctl.u.getmemlist.max_pfns = nr_pages;
     domctl.u.getmemlist.start_pfn = start_page;
     domctl.u.getmemlist.num_pfns = 0;
-    set_xen_guest_handle(domctl.u.getmemlist.buffer, pfn_buf);
+    xc_set_xen_guest_handle(domctl.u.getmemlist.buffer, pfn_buf);
 
     if (lock_pages(pfn_buf, nr_pages * sizeof(xen_pfn_t)) != 0) {
         PERROR("Could not lock pfn list buffer");
diff -r f3b26cbd7eb5 -r 7f735088ac1d tools/libxc/xc_dom_boot.c
--- a/tools/libxc/xc_dom_boot.c Mon Sep 06 14:28:11 2010 +0100
+++ b/tools/libxc/xc_dom_boot.c Mon Sep 06 14:28:11 2010 +0100
@@ -61,9 +61,10 @@ static int setup_hypercall_page(struct x
     return rc;
 }
 
-static int launch_vm(xc_interface *xch, domid_t domid, void *ctxt)
+static int launch_vm(xc_interface *xch, domid_t domid, xc_hypercall_buffer_t 
*ctxt)
 {
     DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BUFFER_ARGUMENT(ctxt);
     int rc;
 
     xc_dom_printf(xch, "%s: called, ctxt=%p", __FUNCTION__, ctxt);
@@ -71,7 +72,7 @@ static int launch_vm(xc_interface *xch, 
     domctl.cmd = XEN_DOMCTL_setvcpucontext;
     domctl.domain = domid;
     domctl.u.vcpucontext.vcpu = 0;
-    set_xen_guest_handle(domctl.u.vcpucontext.ctxt, ctxt);
+    xc_set_xen_guest_handle(domctl.u.vcpucontext.ctxt, ctxt);
     rc = do_domctl(xch, &domctl);
     if ( rc != 0 )
         xc_dom_panic(xch, XC_INTERNAL_ERROR,
@@ -202,8 +203,12 @@ int xc_dom_boot_image(struct xc_dom_imag
 int xc_dom_boot_image(struct xc_dom_image *dom)
 {
     DECLARE_DOMCTL;
-    vcpu_guest_context_any_t ctxt;
+    DECLARE_HYPERCALL_BUFFER(vcpu_guest_context_any_t, ctxt);
     int rc;
+
+    ctxt = xc_hypercall_buffer_alloc(dom->xch, ctxt, sizeof(*ctxt));
+    if ( ctxt == NULL )
+        return -1;
 
     DOMPRINTF_CALLED(dom->xch);
 
@@ -260,12 +265,13 @@ int xc_dom_boot_image(struct xc_dom_imag
         return rc;
 
     /* let the vm run */
-    memset(&ctxt, 0, sizeof(ctxt));
-    if ( (rc = dom->arch_hooks->vcpu(dom, &ctxt)) != 0 )
+    memset(ctxt, 0, sizeof(ctxt));
+    if ( (rc = dom->arch_hooks->vcpu(dom, ctxt)) != 0 )
         return rc;
     xc_dom_unmap_all(dom);
-    rc = launch_vm(dom->xch, dom->guest_domid, &ctxt);
+    rc = launch_vm(dom->xch, dom->guest_domid, HYPERCALL_BUFFER(ctxt));
 
+    xc_hypercall_buffer_free(dom->xch, ctxt);
     return rc;
 }
 
diff -r f3b26cbd7eb5 -r 7f735088ac1d tools/libxc/xc_domain.c
--- a/tools/libxc/xc_domain.c   Mon Sep 06 14:28:11 2010 +0100
+++ b/tools/libxc/xc_domain.c   Mon Sep 06 14:28:11 2010 +0100
@@ -115,36 +115,31 @@ int xc_vcpu_setaffinity(xc_interface *xc
                         uint64_t *cpumap, int cpusize)
 {
     DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BUFFER(uint8_t, local);
     int ret = -1;
-    uint8_t *local = malloc(cpusize); 
 
-    if(local == NULL)
+    local = xc_hypercall_buffer_alloc(xch, local, cpusize);
+    if ( local == NULL )
     {
-        PERROR("Could not alloc memory for Xen hypercall");
+        PERROR("Could not alloc locked memory for Xen hypercall");
         goto out;
     }
+
     domctl.cmd = XEN_DOMCTL_setvcpuaffinity;
     domctl.domain = (domid_t)domid;
     domctl.u.vcpuaffinity.vcpu    = vcpu;
 
     bitmap_64_to_byte(local, cpumap, cpusize * 8);
 
-    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
+    xc_set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
 
     domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
-    
-    if ( lock_pages(xch, local, cpusize) != 0 )
-    {
-        PERROR("Could not lock memory for Xen hypercall");
-        goto out;
-    }
 
     ret = do_domctl(xch, &domctl);
 
-    unlock_pages(xch, local, cpusize);
+    xc_hypercall_buffer_free(xch, local);
 
  out:
-    free(local);
     return ret;
 }
 
@@ -155,9 +150,10 @@ int xc_vcpu_getaffinity(xc_interface *xc
                         uint64_t *cpumap, int cpusize)
 {
     DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BUFFER(uint8_t, local);
     int ret = -1;
-    uint8_t * local = malloc(cpusize);
 
+    local = xc_hypercall_buffer_alloc(xch, local, cpusize);
     if(local == NULL)
     {
         PERROR("Could not alloc memory for Xen hypercall");
@@ -168,22 +164,15 @@ int xc_vcpu_getaffinity(xc_interface *xc
     domctl.domain = (domid_t)domid;
     domctl.u.vcpuaffinity.vcpu = vcpu;
 
-
-    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
+    xc_set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
     domctl.u.vcpuaffinity.cpumap.nr_cpus = cpusize * 8;
-    
-    if ( lock_pages(xch, local, sizeof(local)) != 0 )
-    {
-        PERROR("Could not lock memory for Xen hypercall");
-        goto out;
-    }
 
     ret = do_domctl(xch, &domctl);
 
-    unlock_pages(xch, local, sizeof (local));
     bitmap_byte_to_64(cpumap, local, cpusize * 8);
+
+    xc_hypercall_buffer_free(xch, local);
 out:
-    free(local);
     return ret;
 }
 
@@ -283,20 +272,23 @@ int xc_domain_hvm_getcontext(xc_interfac
 {
     int ret;
     DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(ctxt_buf, size, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+    if ( ctxt_buf && xc_hypercall_bounce_pre(xch, ctxt_buf) )
+        return -1;
 
     domctl.cmd = XEN_DOMCTL_gethvmcontext;
     domctl.domain = (domid_t)domid;
     domctl.u.hvmcontext.size = size;
-    set_xen_guest_handle(domctl.u.hvmcontext.buffer, ctxt_buf);
-
-    if ( ctxt_buf ) 
-        if ( (ret = lock_pages(xch, ctxt_buf, size)) != 0 )
-            return ret;
+    if ( ctxt_buf )
+        xc_set_xen_guest_handle(domctl.u.hvmcontext.buffer, ctxt_buf);
+    else
+        xc_set_xen_guest_handle(domctl.u.hvmcontext.buffer, 
HYPERCALL_BUFFER_NULL);
 
     ret = do_domctl(xch, &domctl);
 
-    if ( ctxt_buf ) 
-        unlock_pages(xch, ctxt_buf, size);
+    if ( ctxt_buf )
+        xc_hypercall_bounce_post(xch, ctxt_buf);
 
     return (ret < 0 ? -1 : domctl.u.hvmcontext.size);
 }
@@ -312,23 +304,21 @@ int xc_domain_hvm_getcontext_partial(xc_
 {
     int ret;
     DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(ctxt_buf, size, XC_HYPERCALL_BUFFER_BOUNCE_OUT);
 
-    if ( !ctxt_buf ) 
-        return -EINVAL;
+    if ( !ctxt_buf || xc_hypercall_bounce_pre(xch, ctxt_buf) )
+        return -1;
 
     domctl.cmd = XEN_DOMCTL_gethvmcontext_partial;
     domctl.domain = (domid_t) domid;
     domctl.u.hvmcontext_partial.type = typecode;
     domctl.u.hvmcontext_partial.instance = instance;
-    set_xen_guest_handle(domctl.u.hvmcontext_partial.buffer, ctxt_buf);
+    xc_set_xen_guest_handle(domctl.u.hvmcontext_partial.buffer, ctxt_buf);
 
-    if ( (ret = lock_pages(xch, ctxt_buf, size)) != 0 )
-        return ret;
-    
     ret = do_domctl(xch, &domctl);
 
-    if ( ctxt_buf ) 
-        unlock_pages(xch, ctxt_buf, size);
+    if ( ctxt_buf )
+        xc_hypercall_bounce_post(xch, ctxt_buf);
 
     return ret ? -1 : 0;
 }
@@ -341,18 +331,19 @@ int xc_domain_hvm_setcontext(xc_interfac
 {
     int ret;
     DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(ctxt_buf, size, XC_HYPERCALL_BUFFER_BOUNCE_IN);
+
+    if ( xc_hypercall_bounce_pre(xch, ctxt_buf) )
+        return -1;
 
     domctl.cmd = XEN_DOMCTL_sethvmcontext;
     domctl.domain = domid;
     domctl.u.hvmcontext.size = size;
-    set_xen_guest_handle(domctl.u.hvmcontext.buffer, ctxt_buf);
-
-    if ( (ret = lock_pages(xch, ctxt_buf, size)) != 0 )
-        return ret;
+    xc_set_xen_guest_handle(domctl.u.hvmcontext.buffer, ctxt_buf);
 
     ret = do_domctl(xch, &domctl);
 
-    unlock_pages(xch, ctxt_buf, size);
+    xc_hypercall_bounce_post(xch, ctxt_buf);
 
     return ret;
 }
@@ -364,18 +355,19 @@ int xc_vcpu_getcontext(xc_interface *xch
 {
     int rc;
     DECLARE_DOMCTL;
-    size_t sz = sizeof(vcpu_guest_context_any_t);
+    DECLARE_HYPERCALL_BOUNCE(ctxt, sizeof(vcpu_guest_context_any_t), 
XC_HYPERCALL_BUFFER_BOUNCE_OUT);
+
+    if ( xc_hypercall_bounce_pre(xch, ctxt) )
+        return -1;
 
     domctl.cmd = XEN_DOMCTL_getvcpucontext;
     domctl.domain = (domid_t)domid;
     domctl.u.vcpucontext.vcpu   = (uint16_t)vcpu;
-    set_xen_guest_handle(domctl.u.vcpucontext.ctxt, &ctxt->c);
+    xc_set_xen_guest_handle(domctl.u.vcpucontext.ctxt, ctxt);
 
-    
-    if ( (rc = lock_pages(xch, ctxt, sz)) != 0 )
-        return rc;
     rc = do_domctl(xch, &domctl);
-    unlock_pages(xch, ctxt, sz);
+
+    xc_hypercall_bounce_post(xch, ctxt);
 
     return rc;
 }
@@ -559,22 +551,24 @@ int xc_domain_get_tsc_info(xc_interface 
 {
     int rc;
     DECLARE_DOMCTL;
-    xen_guest_tsc_info_t info = { 0 };
+    DECLARE_HYPERCALL_BUFFER(xen_guest_tsc_info_t, info);
+
+    info = xc_hypercall_buffer_alloc(xch, info, sizeof(*info));
+    if ( info == NULL )
+        return -ENOMEM;
 
     domctl.cmd = XEN_DOMCTL_gettscinfo;
     domctl.domain = (domid_t)domid;
-    set_xen_guest_handle(domctl.u.tsc_info.out_info, &info);
-    if ( (rc = lock_pages(xch, &info, sizeof(info))) != 0 )
-        return rc;
+    xc_set_xen_guest_handle(domctl.u.tsc_info.out_info, info);
     rc = do_domctl(xch, &domctl);
     if ( rc == 0 )
     {
-        *tsc_mode = info.tsc_mode;
-        *elapsed_nsec = info.elapsed_nsec;
-        *gtsc_khz = info.gtsc_khz;
-        *incarnation = info.incarnation;
+        *tsc_mode = info->tsc_mode;
+        *elapsed_nsec = info->elapsed_nsec;
+        *gtsc_khz = info->gtsc_khz;
+        *incarnation = info->incarnation;
     }
-    unlock_pages(xch, &info,sizeof(info));
+    xc_hypercall_buffer_free(xch, info);
     return rc;
 }
 
@@ -840,8 +834,8 @@ int xc_vcpu_setcontext(xc_interface *xch
                        vcpu_guest_context_any_t *ctxt)
 {
     DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(ctxt, sizeof(vcpu_guest_context_any_t), 
XC_HYPERCALL_BUFFER_BOUNCE_IN);
     int rc;
-    size_t sz = sizeof(vcpu_guest_context_any_t);
 
     if (ctxt == NULL)
     {
@@ -849,16 +843,17 @@ int xc_vcpu_setcontext(xc_interface *xch
         return -1;
     }
 
+    if ( xc_hypercall_bounce_pre(xch, ctxt) )
+        return -1;
+
     domctl.cmd = XEN_DOMCTL_setvcpucontext;
     domctl.domain = domid;
     domctl.u.vcpucontext.vcpu = vcpu;
-    set_xen_guest_handle(domctl.u.vcpucontext.ctxt, &ctxt->c);
+    xc_set_xen_guest_handle(domctl.u.vcpucontext.ctxt, ctxt);
 
-    if ( (rc = lock_pages(xch, ctxt, sz)) != 0 )
-        return rc;
     rc = do_domctl(xch, &domctl);
-    
-    unlock_pages(xch, ctxt, sz);
+
+    xc_hypercall_bounce_post(xch, ctxt);
 
     return rc;
 }
@@ -984,6 +979,13 @@ int xc_get_device_group(
 {
     int rc;
     DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(sdev_array, max_sdevs * sizeof(*sdev_array), 
XC_HYPERCALL_BUFFER_BOUNCE_IN);
+
+    if ( xc_hypercall_bounce_pre(xch, sdev_array) )
+    {
+        PERROR("Could not lock memory for xc_get_device_group");
+        return -1;
+    }
 
     domctl.cmd = XEN_DOMCTL_get_device_group;
     domctl.domain = (domid_t)domid;
@@ -991,17 +993,14 @@ int xc_get_device_group(
     domctl.u.get_device_group.machine_bdf = machine_bdf;
     domctl.u.get_device_group.max_sdevs = max_sdevs;
 
-    set_xen_guest_handle(domctl.u.get_device_group.sdev_array, sdev_array);
+    xc_set_xen_guest_handle(domctl.u.get_device_group.sdev_array, sdev_array);
 
-    if ( lock_pages(xch, sdev_array, max_sdevs * sizeof(*sdev_array)) != 0 )
-    {
-        PERROR("Could not lock memory for xc_get_device_group");
-        return -ENOMEM;
-    }
     rc = do_domctl(xch, &domctl);
-    unlock_pages(xch, sdev_array, max_sdevs * sizeof(*sdev_array));
 
     *num_sdevs = domctl.u.get_device_group.num_sdevs;
+
+    xc_hypercall_bounce_post(xch, sdev_array);
+
     return rc;
 }
 
diff -r f3b26cbd7eb5 -r 7f735088ac1d tools/libxc/xc_private.c
--- a/tools/libxc/xc_private.c  Mon Sep 06 14:28:11 2010 +0100
+++ b/tools/libxc/xc_private.c  Mon Sep 06 14:28:11 2010 +0100
@@ -322,12 +322,18 @@ int xc_get_pfn_type_batch(xc_interface *
 int xc_get_pfn_type_batch(xc_interface *xch, uint32_t dom,
                           unsigned int num, xen_pfn_t *arr)
 {
+    int rc;
     DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(arr, sizeof(*arr) * num, 
XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
+    if ( xc_hypercall_bounce_pre(xch, arr) )
+        return -1;
     domctl.cmd = XEN_DOMCTL_getpageframeinfo3;
     domctl.domain = (domid_t)dom;
     domctl.u.getpageframeinfo3.num = num;
-    set_xen_guest_handle(domctl.u.getpageframeinfo3.array, arr);
-    return do_domctl(xch, &domctl);
+    xc_set_xen_guest_handle(domctl.u.getpageframeinfo3.array, arr);
+    rc = do_domctl(xch, &domctl);
+    xc_hypercall_bounce_post(xch, arr);
+    return rc;
 }
 
 int xc_mmuext_op(
@@ -557,25 +563,27 @@ int xc_get_pfn_list(xc_interface *xch,
                     unsigned long max_pfns)
 {
     DECLARE_DOMCTL;
+    DECLARE_HYPERCALL_BOUNCE(pfn_buf, max_pfns * sizeof(*pfn_buf), 
XC_HYPERCALL_BUFFER_BOUNCE_OUT);
     int ret;
-    domctl.cmd = XEN_DOMCTL_getmemlist;
-    domctl.domain   = (domid_t)domid;
-    domctl.u.getmemlist.max_pfns = max_pfns;
-    set_xen_guest_handle(domctl.u.getmemlist.buffer, pfn_buf);
 
 #ifdef VALGRIND
     memset(pfn_buf, 0, max_pfns * sizeof(*pfn_buf));
 #endif
 
-    if ( lock_pages(xch, pfn_buf, max_pfns * sizeof(*pfn_buf)) != 0 )
+    if ( xc_hypercall_bounce_pre(xch, pfn_buf) )
     {
         PERROR("xc_get_pfn_list: pfn_buf lock failed");
         return -1;
     }
 
+    domctl.cmd = XEN_DOMCTL_getmemlist;
+    domctl.domain   = (domid_t)domid;
+    domctl.u.getmemlist.max_pfns = max_pfns;
+    xc_set_xen_guest_handle(domctl.u.getmemlist.buffer, pfn_buf);
+
     ret = do_domctl(xch, &domctl);
 
-    unlock_pages(xch, pfn_buf, max_pfns * sizeof(*pfn_buf));
+    xc_hypercall_bounce_post(xch, pfn_buf);
 
     return (ret < 0) ? -1 : domctl.u.getmemlist.num_pfns;
 }
diff -r f3b26cbd7eb5 -r 7f735088ac1d tools/libxc/xc_private.h
--- a/tools/libxc/xc_private.h  Mon Sep 06 14:28:11 2010 +0100
+++ b/tools/libxc/xc_private.h  Mon Sep 06 14:28:11 2010 +0100
@@ -209,17 +209,18 @@ static inline int do_domctl(xc_interface
 {
     int ret = -1;
     DECLARE_HYPERCALL;
+    DECLARE_HYPERCALL_BOUNCE(domctl, sizeof(*domctl), 
XC_HYPERCALL_BUFFER_BOUNCE_BOTH);
 
-    if ( hcall_buf_prep(xch, (void **)&domctl, sizeof(*domctl)) != 0 )
+    domctl->interface_version = XEN_DOMCTL_INTERFACE_VERSION;
+
+    if ( xc_hypercall_bounce_pre(xch, domctl) )
     {
         PERROR("Could not lock memory for Xen hypercall");
         goto out1;
     }
 
-    domctl->interface_version = XEN_DOMCTL_INTERFACE_VERSION;
-
     hypercall.op     = __HYPERVISOR_domctl;
-    hypercall.arg[0] = (unsigned long)domctl;
+    hypercall.arg[0] = HYPERCALL_BUFFER_AS_ARG(domctl);
 
     if ( (ret = do_xen_hypercall(xch, &hypercall)) < 0 )
     {
@@ -228,8 +229,7 @@ static inline int do_domctl(xc_interface
                     " rebuild the user-space tool set?\n");
     }
 
-    hcall_buf_release(xch, (void **)&domctl, sizeof(*domctl));
-
+    xc_hypercall_bounce_post(xch, domctl);
  out1:
     return ret;
 }
diff -r f3b26cbd7eb5 -r 7f735088ac1d tools/libxc/xc_resume.c
--- a/tools/libxc/xc_resume.c   Mon Sep 06 14:28:11 2010 +0100
+++ b/tools/libxc/xc_resume.c   Mon Sep 06 14:28:11 2010 +0100
@@ -196,12 +196,6 @@ static int xc_domain_resume_any(xc_inter
         goto out;
     }
 
-    if ( lock_pages(xch, &ctxt, sizeof(ctxt)) )
-    {
-        ERROR("Unable to lock ctxt");
-        goto out;
-    }
-
     if ( xc_vcpu_getcontext(xch, domid, 0, &ctxt) )
     {
         ERROR("Could not get vcpu context");
@@ -235,7 +229,6 @@ static int xc_domain_resume_any(xc_inter
 
 #if defined(__i386__) || defined(__x86_64__)
  out:
-    unlock_pages(xch, (void *)&ctxt, sizeof ctxt);
     if (p2m)
         munmap(p2m, P2M_FL_ENTRIES*PAGE_SIZE);
     if (p2m_frame_list)

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.