|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 3/4] xen: sched: improve checking soft-affinity
Whether or not a vCPU has a soft-affinity which is
effective, i.e., with the power of actually affecting
the scheduling of the vCPU itself, happens in an
helper function, called has_soft_affinity().
Such function takes a custom cpumask as its third
parameter, for better flexibility, but that mask is
different from the vCPU's hard-affinity only in one
case. Getting rid of that parameter, not only simplify
the function, but enables for optimizing the soft
affinity check (which will happen, in a subsequent
commit).
This commit, therefore, does that. It's mostly
mechanical, with the only exception _csched_cpu_pick()
(in Credit1 code).
Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx>
---
Cc: George Dunlap <george.dunlap@xxxxxxxxxx>
Cc: Anshul Makkar <anshulmakkar@xxxxxxxxx>
---
xen/common/sched_credit.c | 79 +++++++++++++++++++++-----------------------
xen/common/sched_credit2.c | 10 ++----
xen/common/sched_null.c | 8 ++--
xen/include/xen/sched-if.h | 8 ++--
4 files changed, 48 insertions(+), 57 deletions(-)
diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c
index 3efbfc8..35d0c98 100644
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -410,8 +410,7 @@ static inline void __runq_tickle(struct csched_vcpu *new)
int new_idlers_empty;
if ( balance_step == BALANCE_SOFT_AFFINITY
- && !has_soft_affinity(new->vcpu,
- new->vcpu->cpu_hard_affinity) )
+ && !has_soft_affinity(new->vcpu) )
continue;
/* Are there idlers suitable for new (for this balance step)? */
@@ -743,50 +742,42 @@ __csched_vcpu_is_migrateable(struct vcpu *vc, int
dest_cpu, cpumask_t *mask)
static int
_csched_cpu_pick(const struct scheduler *ops, struct vcpu *vc, bool_t commit)
{
- cpumask_t cpus;
cpumask_t idlers;
- cpumask_t *online;
+ cpumask_t *online = cpupool_domain_cpumask(vc->domain);
struct csched_pcpu *spc = NULL;
int cpu = vc->processor;
int balance_step;
- /* Store in cpus the mask of online cpus on which the domain can run */
- online = cpupool_domain_cpumask(vc->domain);
- cpumask_and(&cpus, vc->cpu_hard_affinity, online);
-
for_each_affinity_balance_step( balance_step )
{
+ affinity_balance_cpumask(vc, balance_step, cpumask_scratch_cpu(cpu));
+ cpumask_and(cpumask_scratch_cpu(cpu), online,
cpumask_scratch_cpu(cpu));
/*
* We want to pick up a pcpu among the ones that are online and
- * can accommodate vc, which is basically what we computed above
- * and stored in cpus. As far as hard affinity is concerned,
- * there always will be at least one of these pcpus, hence cpus
- * is never empty and the calls to cpumask_cycle() and
- * cpumask_test_cpu() below are ok.
+ * can accommodate vc. As far as hard affinity is concerned, there
+ * always will be at least one of these pcpus in the scratch cpumask,
+ * hence, the calls to cpumask_cycle() and cpumask_test_cpu() below
+ * are ok.
*
- * On the other hand, when considering soft affinity too, it
- * is possible for the mask to become empty (for instance, if the
- * domain has been put in a cpupool that does not contain any of the
- * pcpus in its soft affinity), which would result in the ASSERT()-s
- * inside cpumask_*() operations triggering (in debug builds).
+ * On the other hand, when considering soft affinity, it is possible
+ * that the mask is empty (for instance, if the domain has been put
+ * in a cpupool that does not contain any of the pcpus in its soft
+ * affinity), which would result in the ASSERT()-s inside cpumask_*()
+ * operations triggering (in debug builds).
*
- * Therefore, in this case, we filter the soft affinity mask against
- * cpus and, if the result is empty, we just skip the soft affinity
+ * Therefore, if that is the case, we just skip the soft affinity
* balancing step all together.
*/
- if ( balance_step == BALANCE_SOFT_AFFINITY
- && !has_soft_affinity(vc, &cpus) )
+ if ( balance_step == BALANCE_SOFT_AFFINITY &&
+ (!has_soft_affinity(vc) ||
+ cpumask_empty(cpumask_scratch_cpu(cpu))) )
continue;
- /* Pick an online CPU from the proper affinity mask */
- affinity_balance_cpumask(vc, balance_step, &cpus);
- cpumask_and(&cpus, &cpus, online);
-
/* If present, prefer vc's current processor */
- cpu = cpumask_test_cpu(vc->processor, &cpus)
+ cpu = cpumask_test_cpu(vc->processor, cpumask_scratch_cpu(cpu))
? vc->processor
- : cpumask_cycle(vc->processor, &cpus);
- ASSERT(cpumask_test_cpu(cpu, &cpus));
+ : cpumask_cycle(vc->processor, cpumask_scratch_cpu(cpu));
+ ASSERT(cpumask_test_cpu(cpu, cpumask_scratch_cpu(cpu)));
/*
* Try to find an idle processor within the above constraints.
@@ -807,7 +798,8 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu
*vc, bool_t commit)
cpumask_and(&idlers, &cpu_online_map, CSCHED_PRIV(ops)->idlers);
if ( vc->processor == cpu && is_runq_idle(cpu) )
__cpumask_set_cpu(cpu, &idlers);
- cpumask_and(&cpus, &cpus, &idlers);
+ cpumask_and(cpumask_scratch_cpu(cpu), &idlers,
+ cpumask_scratch_cpu(cpu));
/*
* It is important that cpu points to an idle processor, if a suitable
@@ -821,18 +813,19 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu
*vc, bool_t commit)
* Notice that cpumask_test_cpu() is quicker than cpumask_empty(), so
* we check for it first.
*/
- if ( !cpumask_test_cpu(cpu, &cpus) && !cpumask_empty(&cpus) )
- cpu = cpumask_cycle(cpu, &cpus);
- __cpumask_clear_cpu(cpu, &cpus);
+ if ( !cpumask_test_cpu(cpu, cpumask_scratch_cpu(cpu)) &&
+ !cpumask_empty(cpumask_scratch_cpu(cpu)) )
+ cpu = cpumask_cycle(cpu, cpumask_scratch_cpu(cpu));
+ __cpumask_clear_cpu(cpu, cpumask_scratch_cpu(cpu));
- while ( !cpumask_empty(&cpus) )
+ while ( !cpumask_empty(cpumask_scratch_cpu(cpu)) )
{
cpumask_t cpu_idlers;
cpumask_t nxt_idlers;
int nxt, weight_cpu, weight_nxt;
int migrate_factor;
- nxt = cpumask_cycle(cpu, &cpus);
+ nxt = cpumask_cycle(cpu, cpumask_scratch_cpu(cpu));
if ( cpumask_test_cpu(cpu, per_cpu(cpu_core_mask, nxt)) )
{
@@ -862,14 +855,19 @@ _csched_cpu_pick(const struct scheduler *ops, struct vcpu
*vc, bool_t commit)
weight_cpu > weight_nxt :
weight_cpu * migrate_factor < weight_nxt )
{
- cpumask_and(&nxt_idlers, &cpus, &nxt_idlers);
+ cpumask_and(&nxt_idlers, &nxt_idlers,
+ cpumask_scratch_cpu(cpu));
spc = CSCHED_PCPU(nxt);
cpu = cpumask_cycle(spc->idle_bias, &nxt_idlers);
- cpumask_andnot(&cpus, &cpus, per_cpu(cpu_sibling_mask, cpu));
+ cpumask_andnot(cpumask_scratch_cpu(cpu),
+ cpumask_scratch_cpu(cpu),
+ per_cpu(cpu_sibling_mask, cpu));
}
else
{
- cpumask_andnot(&cpus, &cpus, &nxt_idlers);
+ cpumask_andnot(cpumask_scratch_cpu(cpu),
+ cpumask_scratch_cpu(cpu),
+ &nxt_idlers);
}
}
@@ -1687,9 +1685,8 @@ csched_runq_steal(int peer_cpu, int cpu, int pri, int
balance_step)
* vCPUs with useful soft affinities in some sort of bitmap
* or counter.
*/
- if ( vc->is_running ||
- (balance_step == BALANCE_SOFT_AFFINITY
- && !has_soft_affinity(vc, vc->cpu_hard_affinity)) )
+ if ( vc->is_running || (balance_step == BALANCE_SOFT_AFFINITY &&
+ !has_soft_affinity(vc)) )
continue;
affinity_balance_cpumask(vc, balance_step, cpumask_scratch);
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index e1985fb..b5611c9 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -698,8 +698,7 @@ static int get_fallback_cpu(struct csched2_vcpu *svc)
{
int cpu = v->processor;
- if ( bs == BALANCE_SOFT_AFFINITY &&
- !has_soft_affinity(v, v->cpu_hard_affinity) )
+ if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(v) )
continue;
affinity_balance_cpumask(v, bs, cpumask_scratch_cpu(cpu));
@@ -1482,8 +1481,7 @@ runq_tickle(const struct scheduler *ops, struct
csched2_vcpu *new, s_time_t now)
for_each_affinity_balance_step( bs )
{
/* Just skip first step, if we don't have a soft affinity */
- if ( bs == BALANCE_SOFT_AFFINITY &&
- !has_soft_affinity(new->vcpu, new->vcpu->cpu_hard_affinity) )
+ if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(new->vcpu) )
continue;
affinity_balance_cpumask(new->vcpu, bs, cpumask_scratch_cpu(cpu));
@@ -2283,7 +2281,7 @@ csched2_cpu_pick(const struct scheduler *ops, struct vcpu
*vc)
*
* Find both runqueues in one pass.
*/
- has_soft = has_soft_affinity(vc, vc->cpu_hard_affinity);
+ has_soft = has_soft_affinity(vc);
for_each_cpu(i, &prv->active_queues)
{
struct csched2_runqueue_data *rqd;
@@ -3190,7 +3188,7 @@ runq_candidate(struct csched2_runqueue_data *rqd,
}
/* If scurr has a soft-affinity, let's check whether cpu is part of it */
- if ( has_soft_affinity(scurr->vcpu, scurr->vcpu->cpu_hard_affinity) )
+ if ( has_soft_affinity(scurr->vcpu) )
{
affinity_balance_cpumask(scurr->vcpu, BALANCE_SOFT_AFFINITY,
cpumask_scratch);
diff --git a/xen/common/sched_null.c b/xen/common/sched_null.c
index b4a24ba..4115d4a 100644
--- a/xen/common/sched_null.c
+++ b/xen/common/sched_null.c
@@ -299,8 +299,7 @@ static unsigned int pick_cpu(struct null_private *prv,
struct vcpu *v)
for_each_affinity_balance_step( bs )
{
- if ( bs == BALANCE_SOFT_AFFINITY &&
- !has_soft_affinity(v, v->cpu_hard_affinity) )
+ if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(v) )
continue;
affinity_balance_cpumask(v, bs, cpumask_scratch_cpu(cpu));
@@ -512,8 +511,7 @@ static void _vcpu_remove(struct null_private *prv, struct
vcpu *v)
{
list_for_each_entry( wvc, &prv->waitq, waitq_elem )
{
- if ( bs == BALANCE_SOFT_AFFINITY &&
- !has_soft_affinity(wvc->vcpu, wvc->vcpu->cpu_hard_affinity) )
+ if ( bs == BALANCE_SOFT_AFFINITY && !has_soft_affinity(wvc->vcpu) )
continue;
if ( vcpu_check_affinity(wvc->vcpu, cpu, bs) )
@@ -782,7 +780,7 @@ static struct task_slice null_schedule(const struct
scheduler *ops,
list_for_each_entry( wvc, &prv->waitq, waitq_elem )
{
if ( bs == BALANCE_SOFT_AFFINITY &&
- !has_soft_affinity(wvc->vcpu,
wvc->vcpu->cpu_hard_affinity) )
+ !has_soft_affinity(wvc->vcpu) )
continue;
if ( vcpu_check_affinity(wvc->vcpu, cpu, bs) )
diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h
index 982c780..417789a 100644
--- a/xen/include/xen/sched-if.h
+++ b/xen/include/xen/sched-if.h
@@ -243,16 +243,14 @@ static inline cpumask_t* cpupool_domain_cpumask(struct
domain *d)
* Soft affinity only needs to be considered if:
* * The cpus in the cpupool are not a subset of soft affinity
* * The hard affinity is not a subset of soft affinity
- * * There is an overlap between the soft affinity and the mask which is
- * currently being considered.
+ * * There is an overlap between the soft and hard affinity masks
*/
-static inline int has_soft_affinity(const struct vcpu *v,
- const cpumask_t *mask)
+static inline int has_soft_affinity(const struct vcpu *v)
{
return !cpumask_subset(cpupool_domain_cpumask(v->domain),
v->cpu_soft_affinity) &&
!cpumask_subset(v->cpu_hard_affinity, v->cpu_soft_affinity) &&
- cpumask_intersects(v->cpu_soft_affinity, mask);
+ cpumask_intersects(v->cpu_soft_affinity, v->cpu_hard_affinity);
}
/*
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |