[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v3 10/14] libxc: get and set soft and hard affinity



by using the new flag introduced in the parameters of the
DOMCTL_{get,set}_vcpuaffinity hypercall.

This happens by adding a new parameter (flags) to
xc_vcpu_setaffinity() and xc_vcpu_getaffinity(), so that the
caller can decide to set either the soft or hard affinity, or
even both.

In case of setting both hard and soft, they are set to the
same cpumap. xc_get_setaffinity() also takes another new param,
for reporting back to the caller what the actual affinity the
scheduler uses will be after a successful call.
In case of asking to get both hard and soft, what the caller
gets is the intersection between them.

In-tree callers are also fixed to cope with the new interface.

Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
---
Changes from v2:
 * better cleanup logic in _vcpu_setaffinity() (regarding
   xc_hypercall_buffer_{alloc,free}()), as suggested during
   review;
 * make it more evident that DOMCTL_setvcpuaffinity has an out
   parameter, by calling ecpumap_out, and improving the comment
   wrt that;
 * change the interface and have xc_vcpu_[sg]etaffinity() so
   that they take the new parameters (flags and ecpumap_out) and
   fix the in tree callers.
---
 tools/libxc/xc_domain.c             |   47 +++++++++++++++++++++--------------
 tools/libxc/xenctrl.h               |   44 ++++++++++++++++++++++++++++++++-
 tools/libxl/libxl.c                 |    7 ++++-
 tools/ocaml/libs/xc/xenctrl_stubs.c |    8 ++++--
 tools/python/xen/lowlevel/xc/xc.c   |    6 +++-
 5 files changed, 86 insertions(+), 26 deletions(-)

diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index f9ae4bf..bddf4e0 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -192,44 +192,52 @@ int xc_domain_node_getaffinity(xc_interface *xch,
 int xc_vcpu_setaffinity(xc_interface *xch,
                         uint32_t domid,
                         int vcpu,
-                        xc_cpumap_t cpumap)
+                        xc_cpumap_t cpumap,
+                        uint32_t flags,
+                        xc_cpumap_t ecpumap_out)
 {
     DECLARE_DOMCTL;
-    DECLARE_HYPERCALL_BUFFER(uint8_t, local);
+    DECLARE_HYPERCALL_BUFFER(uint8_t, cpumap_local);
+    DECLARE_HYPERCALL_BUFFER(uint8_t, ecpumap_local);
     int ret = -1;
     int cpusize;
 
     cpusize = xc_get_cpumap_size(xch);
-    if (!cpusize)
+    if ( !cpusize )
     {
         PERROR("Could not get number of cpus");
-        goto out;
+        return -1;;
     }
 
-    local = xc_hypercall_buffer_alloc(xch, local, cpusize);
-    if ( local == NULL )
+    cpumap_local = xc_hypercall_buffer_alloc(xch, cpumap_local, cpusize);
+    ecpumap_local = xc_hypercall_buffer_alloc(xch, ecpumap_local, cpusize);
+    if ( cpumap_local == NULL || cpumap_local == NULL)
     {
-        PERROR("Could not allocate memory for setvcpuaffinity domctl 
hypercall");
+        PERROR("Could not allocate hcall buffers for DOMCTL_setvcpuaffinity");
         goto out;
     }
 
     domctl.cmd = XEN_DOMCTL_setvcpuaffinity;
     domctl.domain = (domid_t)domid;
     domctl.u.vcpuaffinity.vcpu = vcpu;
-    /* Soft affinity is there, but not used anywhere for now, so... */
-    domctl.u.vcpuaffinity.flags = XEN_VCPUAFFINITY_HARD;
-
-    memcpy(local, cpumap, cpusize);
-
-    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
+    domctl.u.vcpuaffinity.flags = flags;
 
+    memcpy(cpumap_local, cpumap, cpusize);
+    set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, cpumap_local);
     domctl.u.vcpuaffinity.cpumap.nr_bits = cpusize * 8;
 
+    set_xen_guest_handle(domctl.u.vcpuaffinity.eff_cpumap.bitmap,
+                         ecpumap_local);
+    domctl.u.vcpuaffinity.eff_cpumap.nr_bits = cpusize * 8;
+
     ret = do_domctl(xch, &domctl);
 
-    xc_hypercall_buffer_free(xch, local);
+    if ( ecpumap_out != NULL )
+        memcpy(ecpumap_out, ecpumap_local, cpusize);
 
  out:
+    xc_hypercall_buffer_free(xch, cpumap_local);
+    xc_hypercall_buffer_free(xch, ecpumap_local);
     return ret;
 }
 
@@ -237,6 +245,7 @@ int xc_vcpu_setaffinity(xc_interface *xch,
 int xc_vcpu_getaffinity(xc_interface *xch,
                         uint32_t domid,
                         int vcpu,
+                        uint32_t flags,
                         xc_cpumap_t cpumap)
 {
     DECLARE_DOMCTL;
@@ -245,22 +254,23 @@ int xc_vcpu_getaffinity(xc_interface *xch,
     int cpusize;
 
     cpusize = xc_get_cpumap_size(xch);
-    if (!cpusize)
+    if ( !cpusize )
     {
         PERROR("Could not get number of cpus");
-        goto out;
+        return -1;
     }
 
     local = xc_hypercall_buffer_alloc(xch, local, cpusize);
-    if (local == NULL)
+    if ( local == NULL )
     {
         PERROR("Could not allocate memory for getvcpuaffinity domctl 
hypercall");
-        goto out;
+        return -1;
     }
 
     domctl.cmd = XEN_DOMCTL_getvcpuaffinity;
     domctl.domain = (domid_t)domid;
     domctl.u.vcpuaffinity.vcpu = vcpu;
+    domctl.u.vcpuaffinity.flags = flags;
 
     set_xen_guest_handle(domctl.u.vcpuaffinity.cpumap.bitmap, local);
     domctl.u.vcpuaffinity.cpumap.nr_bits = cpusize * 8;
@@ -270,7 +280,6 @@ int xc_vcpu_getaffinity(xc_interface *xch,
     memcpy(cpumap, local, cpusize);
 
     xc_hypercall_buffer_free(xch, local);
-out:
     return ret;
 }
 
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
index 4ac6b8a..a97ed67 100644
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -579,13 +579,55 @@ int xc_domain_node_getaffinity(xc_interface *xch,
                                uint32_t domind,
                                xc_nodemap_t nodemap);
 
+/**
+ * This function specifies the CPU affinity for a vcpu.
+ *
+ * There are two kinds of affinity. Soft affinity is on what pcpus a vcpu
+ * prefers to run. Hard affinity is on what pcpus a vcpu is allowed to run.
+ * If flags contains *only* XEN_VCPUAFFINITY_SOFT, it is the soft affinity
+ * that is set. If flags contains *only* XEN_VCPUAFFINITY_HARD, it is the
+ * hard affinity that is set. If flags contains *both*, both are set to the
+ * same value, provided in cpumap.
+ *
+ * The function also returns the effective affinity, via the ecpumap_out
+ * parameter. Effective affinity it the intersection of soft affinity, hard
+ * affinity and the set of the cpus of the cpupool the domain belongs to.
+ * It basically is what the Xen scheduler will actually use. Reporting it
+ * back to the caller allows him to check if that matches with, or at least
+ * is good enough for, his purposes.
+ *
+ * @param xch a handle to an open hypervisor interface.
+ * @param domid the id of the domain to which the vcpu belongs
+ * @param vcpu the vcpu id wihin the domain
+ * @param cpumap the (hard, soft, both) new affinity map one wants to set
+ * #param flags what we want to set
+ * @param ecpumap_out where the effective affinity for the vcpu is returned
+ */
 int xc_vcpu_setaffinity(xc_interface *xch,
                         uint32_t domid,
                         int vcpu,
-                        xc_cpumap_t cpumap);
+                        xc_cpumap_t cpumap,
+                        uint32_t flags,
+                        xc_cpumap_t ecpumap_out);
+
+/**
+ * This function retrieves hard or soft CPU affinity (or their intersection)
+ * for a vcpu, depending on flags.
+ *
+ * Soft affinity is returned if *only* XEN_VCPUAFFINITY_SOFT is set in flags.
+ * Hard affinity is returned if *only* XEN_VCPUAFFINITY_HARD is set in flags.
+ * If both are set, what is returned is the intersection of the two.
+ *
+ * @param xch a handle to an open hypervisor interface.
+ * @param domid the id of the domain to which the vcpu belongs
+ * @param vcpu the vcpu id wihin the domain
+ * @param flags what we want get
+ * @param cpumap is where the desired affinity is returned
+ */
 int xc_vcpu_getaffinity(xc_interface *xch,
                         uint32_t domid,
                         int vcpu,
+                        uint32_t flags,
                         xc_cpumap_t cpumap);
 
 
diff --git a/tools/libxl/libxl.c b/tools/libxl/libxl.c
index d3ab65e..d0db3f0 100644
--- a/tools/libxl/libxl.c
+++ b/tools/libxl/libxl.c
@@ -4208,7 +4208,9 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ctx *ctx, uint32_t 
domid,
             LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting vcpu info");
             return NULL;
         }
-        if (xc_vcpu_getaffinity(ctx->xch, domid, *nb_vcpu, ptr->cpumap.map) == 
-1) {
+        if (xc_vcpu_getaffinity(ctx->xch, domid, *nb_vcpu,
+                                XEN_VCPUAFFINITY_HARD,
+                                ptr->cpumap.map) == -1) {
             LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "getting vcpu affinity");
             return NULL;
         }
@@ -4225,7 +4227,8 @@ libxl_vcpuinfo *libxl_list_vcpu(libxl_ctx *ctx, uint32_t 
domid,
 int libxl_set_vcpuaffinity(libxl_ctx *ctx, uint32_t domid, uint32_t vcpuid,
                            libxl_bitmap *cpumap)
 {
-    if (xc_vcpu_setaffinity(ctx->xch, domid, vcpuid, cpumap->map)) {
+    if (xc_vcpu_setaffinity(ctx->xch, domid, vcpuid, cpumap->map,
+                            XEN_VCPUAFFINITY_HARD, NULL)) {
         LIBXL__LOG_ERRNO(ctx, LIBXL__LOG_ERROR, "setting vcpu affinity");
         return ERROR_FAIL;
     }
diff --git a/tools/ocaml/libs/xc/xenctrl_stubs.c 
b/tools/ocaml/libs/xc/xenctrl_stubs.c
index f5cf0ed..30327d4 100644
--- a/tools/ocaml/libs/xc/xenctrl_stubs.c
+++ b/tools/ocaml/libs/xc/xenctrl_stubs.c
@@ -438,7 +438,9 @@ CAMLprim value stub_xc_vcpu_setaffinity(value xch, value 
domid,
                        c_cpumap[i/8] |= 1 << (i&7);
        }
        retval = xc_vcpu_setaffinity(_H(xch), _D(domid),
-                                    Int_val(vcpu), c_cpumap);
+                                    Int_val(vcpu), c_cpumap,
+                                    XEN_VCPUAFFINITY_HARD,
+                                    NULL);
        free(c_cpumap);
 
        if (retval < 0)
@@ -460,7 +462,9 @@ CAMLprim value stub_xc_vcpu_getaffinity(value xch, value 
domid,
                failwith_xc(_H(xch));
 
        retval = xc_vcpu_getaffinity(_H(xch), _D(domid),
-                                    Int_val(vcpu), c_cpumap);
+                                    Int_val(vcpu),
+                                    XEN_VCPUAFFINITY_HARD,
+                                    c_cpumap);
        if (retval < 0) {
                free(c_cpumap);
                failwith_xc(_H(xch));
diff --git a/tools/python/xen/lowlevel/xc/xc.c 
b/tools/python/xen/lowlevel/xc/xc.c
index 2625fc4..9348ce6 100644
--- a/tools/python/xen/lowlevel/xc/xc.c
+++ b/tools/python/xen/lowlevel/xc/xc.c
@@ -256,7 +256,8 @@ static PyObject *pyxc_vcpu_setaffinity(XcObject *self,
         }
     }
   
-    if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap) != 0 )
+    if ( xc_vcpu_setaffinity(self->xc_handle, dom, vcpu, cpumap,
+                             XEN_VCPUAFFINITY_HARD, NULL) != 0 )
     {
         free(cpumap);
         return pyxc_error_to_exception(self->xc_handle);
@@ -403,7 +404,8 @@ static PyObject *pyxc_vcpu_getinfo(XcObject *self,
     if(cpumap == NULL)
         return pyxc_error_to_exception(self->xc_handle);
 
-    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu, cpumap);
+    rc = xc_vcpu_getaffinity(self->xc_handle, dom, vcpu,
+                             XEN_VCPUAFFINITY_HARD, cpumap);
     if ( rc < 0 )
     {
         free(cpumap);


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.