[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] derive NUMA node affinity from hard and soft CPU affinity



commit bf508bd2fb83b51f8547d1969629e05b6b87d1c5
Author:     Dario Faggioli <dario.faggioli@xxxxxxxxxx>
AuthorDate: Mon Jun 16 12:13:03 2014 +0200
Commit:     Jan Beulich <jbeulich@xxxxxxxx>
CommitDate: Mon Jun 16 12:13:03 2014 +0200

    derive NUMA node affinity from hard and soft CPU affinity
    
    if a domain's NUMA node-affinity (which is what controls
    memory allocations) is provided by the user/toolstack, it
    just is not touched. However, if the user does not say
    anything, leaving it all to Xen, let's compute it in the
    following way:
    
     1. cpupool's cpus & hard-affinity & soft-affinity
     2. if (1) is empty: cpupool's cpus & hard-affinity
    
    This guarantees memory to be allocated from the narrowest
    possible set of NUMA nodes, ad makes it relatively easy to
    set up NUMA-aware scheduling on top of soft affinity.
    
    Note that such 'narrowest set' is guaranteed to be non-empty.
    
    Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
    Acked-by: George Dunlap <george.dunlap@xxxxxxxxxxxxx>
    Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
---
 xen/common/domain.c   |   61 +++++++++++++++++++++++++++++++-----------------
 xen/common/schedule.c |    4 ++-
 2 files changed, 42 insertions(+), 23 deletions(-)

diff --git a/xen/common/domain.c b/xen/common/domain.c
index e20d3bf..c3a576e 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -409,17 +409,17 @@ struct domain *domain_create(
 
 void domain_update_node_affinity(struct domain *d)
 {
-    cpumask_var_t cpumask;
-    cpumask_var_t online_affinity;
+    cpumask_var_t dom_cpumask, dom_cpumask_soft;
+    cpumask_t *dom_affinity;
     const cpumask_t *online;
     struct vcpu *v;
-    unsigned int node;
+    unsigned int cpu;
 
-    if ( !zalloc_cpumask_var(&cpumask) )
+    if ( !zalloc_cpumask_var(&dom_cpumask) )
         return;
-    if ( !alloc_cpumask_var(&online_affinity) )
+    if ( !zalloc_cpumask_var(&dom_cpumask_soft) )
     {
-        free_cpumask_var(cpumask);
+        free_cpumask_var(dom_cpumask);
         return;
     }
 
@@ -427,31 +427,48 @@ void domain_update_node_affinity(struct domain *d)
 
     spin_lock(&d->node_affinity_lock);
 
-    for_each_vcpu ( d, v )
-    {
-        cpumask_and(online_affinity, v->cpu_hard_affinity, online);
-        cpumask_or(cpumask, cpumask, online_affinity);
-    }
-
     /*
-     * If d->auto_node_affinity is true, the domain's node-affinity mask
-     * (d->node_affinity) is automaically computed from all the domain's
-     * vcpus' vcpu-affinity masks (the union of which we have just built
-     * above in cpumask). OTOH, if d->auto_node_affinity is false, we
-     * must leave the node-affinity of the domain alone.
+     * If d->auto_node_affinity is true, let's compute the domain's
+     * node-affinity and update d->node_affinity accordingly. if false,
+     * just leave d->auto_node_affinity alone.
      */
     if ( d->auto_node_affinity )
     {
+        /*
+         * We want the narrowest possible set of pcpus (to get the narowest
+         * possible set of nodes). What we need is the cpumask of where the
+         * domain can run (the union of the hard affinity of all its vcpus),
+         * and the full mask of where it would prefer to run (the union of
+         * the soft affinity of all its various vcpus). Let's build them.
+         */
+        for_each_vcpu ( d, v )
+        {
+            cpumask_or(dom_cpumask, dom_cpumask, v->cpu_hard_affinity);
+            cpumask_or(dom_cpumask_soft, dom_cpumask_soft,
+                       v->cpu_soft_affinity);
+        }
+        /* Filter out non-online cpus */
+        cpumask_and(dom_cpumask, dom_cpumask, online);
+        ASSERT(!cpumask_empty(dom_cpumask));
+        /* And compute the intersection between hard, online and soft */
+        cpumask_and(dom_cpumask_soft, dom_cpumask_soft, dom_cpumask);
+
+        /*
+         * If not empty, the intersection of hard, soft and online is the
+         * narrowest set we want. If empty, we fall back to hard&online.
+         */
+        dom_affinity = cpumask_empty(dom_cpumask_soft) ?
+                           dom_cpumask : dom_cpumask_soft;
+
         nodes_clear(d->node_affinity);
-        for_each_online_node ( node )
-            if ( cpumask_intersects(&node_to_cpumask(node), cpumask) )
-                node_set(node, d->node_affinity);
+        for_each_cpu ( cpu, dom_affinity )
+            node_set(cpu_to_node(cpu), d->node_affinity);
     }
 
     spin_unlock(&d->node_affinity_lock);
 
-    free_cpumask_var(online_affinity);
-    free_cpumask_var(cpumask);
+    free_cpumask_var(dom_cpumask_soft);
+    free_cpumask_var(dom_cpumask);
 }
 
 
diff --git a/xen/common/schedule.c b/xen/common/schedule.c
index d76a425..e57cd91 100644
--- a/xen/common/schedule.c
+++ b/xen/common/schedule.c
@@ -310,7 +310,9 @@ int sched_move_domain(struct domain *d, struct cpupool *c)
         SCHED_OP(old_ops, free_vdata, vcpudata);
     }
 
-    domain_update_node_affinity(d);
+    /* Do we have vcpus already? If not, no need to update node-affinity */
+    if ( d->vcpu )
+        domain_update_node_affinity(d);
 
     domain_unpause(d);
 
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.