# HG changeset patch # User Juergen Gross # Date 1327311901 -3600 # Node ID cee17928d4eff5e7873f30276f424e16dca878ad # Parent eca719b621a1201528bfec25fb1786ec21c0c9d3 Reflect cpupool in numa node affinity In order to prefer node local memory for a domain the numa node locality info must be built according to the cpus belonging to the cpupool of the domain. Signed-off-by: juergen.gross@xxxxxxxxxxxxxx diff -r eca719b621a1 -r cee17928d4ef xen/common/cpupool.c --- a/xen/common/cpupool.c Sun Jan 22 10:20:03 2012 +0000 +++ b/xen/common/cpupool.c Mon Jan 23 10:45:01 2012 +0100 @@ -220,6 +220,7 @@ static int cpupool_assign_cpu_locked(str { int ret; struct cpupool *old; + struct domain *d; if ( (cpupool_moving_cpu == cpu) && (c != cpupool_cpu_moving) ) return -EBUSY; @@ -240,6 +241,14 @@ static int cpupool_assign_cpu_locked(str cpupool_cpu_moving = NULL; } cpumask_set_cpu(cpu, c->cpu_valid); + + rcu_read_lock(&domlist_read_lock); + for_each_domain_in_cpupool(d, c) + { + domain_update_node_affinity(d); + } + rcu_read_unlock(&domlist_read_lock); + return 0; } diff -r eca719b621a1 -r cee17928d4ef xen/common/domain.c --- a/xen/common/domain.c Sun Jan 22 10:20:03 2012 +0000 +++ b/xen/common/domain.c Mon Jan 23 10:45:01 2012 +0100 @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -365,15 +366,21 @@ void domain_update_node_affinity(struct void domain_update_node_affinity(struct domain *d) { cpumask_t cpumask; + cpumask_t online_affinity; + cpumask_t *online; nodemask_t nodemask = NODE_MASK_NONE; struct vcpu *v; unsigned int node; + online = (d->cpupool == NULL) ? &cpu_online_map : d->cpupool->cpu_valid; cpumask_clear(&cpumask); spin_lock(&d->node_affinity_lock); for_each_vcpu ( d, v ) - cpumask_or(&cpumask, &cpumask, v->cpu_affinity); + { + cpumask_and(&online_affinity, v->cpu_affinity, online); + cpumask_or(&cpumask, &cpumask, &online_affinity); + } for_each_online_node ( node ) if ( cpumask_intersects(&node_to_cpumask(node), &cpumask) ) diff -r eca719b621a1 -r cee17928d4ef xen/common/schedule.c --- a/xen/common/schedule.c Sun Jan 22 10:20:03 2012 +0000 +++ b/xen/common/schedule.c Mon Jan 23 10:45:01 2012 +0100 @@ -282,11 +282,12 @@ int sched_move_domain(struct domain *d, SCHED_OP(VCPU2OP(v), insert_vcpu, v); } - domain_update_node_affinity(d); d->cpupool = c; SCHED_OP(DOM2OP(d), free_domdata, d->sched_priv); d->sched_priv = domdata; + + domain_update_node_affinity(d); domain_unpause(d); @@ -537,7 +538,6 @@ int cpu_disable_scheduler(unsigned int c struct cpupool *c; cpumask_t online_affinity; int ret = 0; - bool_t affinity_broken; c = per_cpu(cpupool, cpu); if ( c == NULL ) @@ -545,8 +545,6 @@ int cpu_disable_scheduler(unsigned int c for_each_domain_in_cpupool ( d, c ) { - affinity_broken = 0; - for_each_vcpu ( d, v ) { vcpu_schedule_lock_irq(v); @@ -558,7 +556,6 @@ int cpu_disable_scheduler(unsigned int c printk("Breaking vcpu affinity for domain %d vcpu %d\n", v->domain->domain_id, v->vcpu_id); cpumask_setall(v->cpu_affinity); - affinity_broken = 1; } if ( v->processor == cpu ) @@ -582,8 +579,7 @@ int cpu_disable_scheduler(unsigned int c ret = -EAGAIN; } - if ( affinity_broken ) - domain_update_node_affinity(d); + domain_update_node_affinity(d); } return ret;