|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v3 3/4] sched: credit2: indent code sections to make review of patch 4/4 easier
Functions runq_tickle and choose_cpu both have code sections that get turned
into loops in patch 4 v3, soft affinity. Do the indenting here to make the
patch 4 diff section easier to read. This patch does not have any changes
other than the addition of one four-space indent per line.
Signed-off-by: Justin T. Weaver <jtweaver@xxxxxxxxxx>
---
Changes in v3: First introduced in patch series version 3
---
xen/common/sched_credit2.c | 152 ++++++++++++++++++++++----------------------
1 file changed, 76 insertions(+), 76 deletions(-)
diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c
index af716e4..bbcfbf2 100644
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -534,58 +534,58 @@ runq_tickle(const struct scheduler *ops, unsigned int
cpu, struct csched2_vcpu *
goto tickle;
}
- /* Get a mask of idle, but not tickled, that new is allowed to run on. */
- cpumask_andnot(&mask, &rqd->idle, &rqd->tickled);
- cpumask_and(&mask, &mask, new->vcpu->cpu_hard_affinity);
+ /* Get a mask of idle, but not tickled, that new is allowed to run on.
*/
+ cpumask_andnot(&mask, &rqd->idle, &rqd->tickled);
+ cpumask_and(&mask, &mask, new->vcpu->cpu_hard_affinity);
- /* If it's not empty, choose one */
- i = cpumask_cycle(cpu, &mask);
- if ( i < nr_cpu_ids )
- {
- ipid = i;
- goto tickle;
- }
+ /* If it's not empty, choose one */
+ i = cpumask_cycle(cpu, &mask);
+ if ( i < nr_cpu_ids )
+ {
+ ipid = i;
+ goto tickle;
+ }
/* Otherwise, look for the non-idle cpu with the lowest credit,
* skipping cpus which have been tickled but not scheduled yet,
* that new is allowed to run on. */
- cpumask_andnot(&mask, &rqd->active, &rqd->idle);
- cpumask_andnot(&mask, &mask, &rqd->tickled);
- cpumask_and(&mask, &mask, new->vcpu->cpu_hard_affinity);
+ cpumask_andnot(&mask, &rqd->active, &rqd->idle);
+ cpumask_andnot(&mask, &mask, &rqd->tickled);
+ cpumask_and(&mask, &mask, new->vcpu->cpu_hard_affinity);
- for_each_cpu(i, &mask)
- {
- struct csched2_vcpu * cur;
+ for_each_cpu(i, &mask)
+ {
+ struct csched2_vcpu * cur;
- /* Already looked at this one above */
- if ( i == cpu )
- continue;
+ /* Already looked at this one above */
+ if ( i == cpu )
+ continue;
- cur = CSCHED2_VCPU(curr_on_cpu(i));
+ cur = CSCHED2_VCPU(curr_on_cpu(i));
- BUG_ON(is_idle_vcpu(cur->vcpu));
+ BUG_ON(is_idle_vcpu(cur->vcpu));
- /* Update credits for current to see if we want to preempt */
- burn_credits(rqd, cur, now);
+ /* Update credits for current to see if we want to preempt */
+ burn_credits(rqd, cur, now);
- if ( cur->credit < lowest )
- {
- ipid = i;
- lowest = cur->credit;
- }
+ if ( cur->credit < lowest )
+ {
+ ipid = i;
+ lowest = cur->credit;
+ }
- /* TRACE */ {
- struct {
- unsigned dom:16,vcpu:16;
- unsigned credit;
- } d;
- d.dom = cur->vcpu->domain->domain_id;
- d.vcpu = cur->vcpu->vcpu_id;
- d.credit = cur->credit;
- trace_var(TRC_CSCHED2_TICKLE_CHECK, 1,
- sizeof(d),
- (unsigned char *)&d);
- }
+ /* TRACE */ {
+ struct {
+ unsigned dom:16,vcpu:16;
+ unsigned credit;
+ } d;
+ d.dom = cur->vcpu->domain->domain_id;
+ d.vcpu = cur->vcpu->vcpu_id;
+ d.credit = cur->credit;
+ trace_var(TRC_CSCHED2_TICKLE_CHECK, 1,
+ sizeof(d),
+ (unsigned char *)&d);
+ }
}
/* Only switch to another processor if the credit difference is greater
@@ -1144,45 +1144,45 @@ choose_cpu(const struct scheduler *ops, struct vcpu *vc)
min_avgload = MAX_LOAD;
/* Find the runqueue with the lowest instantaneous load */
- for_each_cpu(i, &prv->active_queues)
- {
- struct csched2_runqueue_data *rqd;
- s_time_t rqd_avgload = MAX_LOAD;
-
- rqd = prv->rqd + i;
-
- /* If checking a different runqueue, grab the lock,
- * check hard affinity, read the avg, and then release the lock.
- *
- * If on our own runqueue, don't grab or release the lock;
- * but subtract our own load from the runqueue load to simulate
- * impartiality.
- *
- * svc's hard affinity may have changed; this function is the
- * credit 2 scheduler's first opportunity to react to the change,
- * so it is possible here that svc does not have hard affinity
- * with any of the pcpus of svc's currently assigned run queue.
- */
- if ( rqd == svc->rqd )
+ for_each_cpu(i, &prv->active_queues)
{
- if ( cpumask_intersects(vc->cpu_hard_affinity, &rqd->active) )
- rqd_avgload = rqd->b_avgload - svc->avgload;
- }
- else if ( spin_trylock(&rqd->lock) )
- {
- if ( cpumask_intersects(vc->cpu_hard_affinity, &rqd->active) )
- rqd_avgload = rqd->b_avgload;
+ struct csched2_runqueue_data *rqd;
+ s_time_t rqd_avgload = MAX_LOAD;
+
+ rqd = prv->rqd + i;
+
+ /* If checking a different runqueue, grab the lock,
+ * check hard affinity, read the avg, and then release the lock.
+ *
+ * If on our own runqueue, don't grab or release the lock;
+ * but subtract our own load from the runqueue load to simulate
+ * impartiality.
+ *
+ * svc's hard affinity may have changed; this function is the
+ * credit 2 scheduler's first opportunity to react to the change,
+ * so it is possible here that svc does not have hard affinity
+ * with any of the pcpus of svc's currently assigned run queue.
+ */
+ if ( rqd == svc->rqd )
+ {
+ if ( cpumask_intersects(vc->cpu_hard_affinity, &rqd->active) )
+ rqd_avgload = rqd->b_avgload - svc->avgload;
+ }
+ else if ( spin_trylock(&rqd->lock) )
+ {
+ if ( cpumask_intersects(vc->cpu_hard_affinity, &rqd->active) )
+ rqd_avgload = rqd->b_avgload;
- spin_unlock(&rqd->lock);
- }
- else
- continue;
+ spin_unlock(&rqd->lock);
+ }
+ else
+ continue;
- if ( rqd_avgload < min_avgload )
- {
- min_avgload = rqd_avgload;
- min_rqi=i;
- }
+ if ( rqd_avgload < min_avgload )
+ {
+ min_avgload = rqd_avgload;
+ min_rqi=i;
+ }
}
/* We didn't find anyone (most likely because of spinlock contention). */
--
1.7.10.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |