[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2] xen: sched: harmonize debug dump output among schedulers.
Information we currently print for idle pCPUs is rather useless. Credit2 already stopped showing that, do the same for Credit and RTDS. Also, define a new CPU status dump hook, which is not defined by those schedulers which already dump such info in other ways (e.g., Credit2, which does that while dumping runqueue information). This also means that, still in Credit2, we can keep the runqueue and pCPU info closer together. Signed-off-by: Dario Faggioli <dario.faggioli@xxxxxxxxxx> Acked-by: Meng Xu <mengxu@xxxxxxxxxxxxx> --- Cc: George Dunlap <george.dunlap@xxxxxxxxxxxxx> Cc: Anshul Makkar <anshul.makkar@xxxxxxxxxx> --- This is basically the rebase of "xen: sched: improve debug dump output.", on top of "xen: credit2: improve debug dump output." (i.e., commit 3af86727b8204). Sorry again, George, for the mess... I was sure I hadn't sent the first one out yet, when I sended it out what turned out to be the second time (and, even worse, slightly reworked! :-( ). I'm keeping Meng's ack, as I did not touch the RTDS part, wrt the patch he sent it against. --- xen/common/sched_credit.c | 6 +++--- xen/common/sched_credit2.c | 34 +++++++++++----------------------- xen/common/sched_rt.c | 9 ++++++++- xen/common/schedule.c | 8 ++++---- 4 files changed, 26 insertions(+), 31 deletions(-) diff --git a/xen/common/sched_credit.c b/xen/common/sched_credit.c index ad20819..7c0ff47 100644 --- a/xen/common/sched_credit.c +++ b/xen/common/sched_credit.c @@ -1988,13 +1988,13 @@ csched_dump_pcpu(const struct scheduler *ops, int cpu) runq = &spc->runq; cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_sibling_mask, cpu)); - printk(" sort=%d, sibling=%s, ", spc->runq_sort_last, cpustr); + printk("CPU[%02d] sort=%d, sibling=%s, ", cpu, spc->runq_sort_last, cpustr); cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_core_mask, cpu)); printk("core=%s\n", cpustr); - /* current VCPU */ + /* current VCPU (nothing to say if that's the idle vcpu). */ svc = CSCHED_VCPU(curr_on_cpu(cpu)); - if ( svc ) + if ( svc && !is_idle_vcpu(svc->vcpu) ) { printk("\trun: "); csched_dump_vcpu(svc); diff --git a/xen/common/sched_credit2.c b/xen/common/sched_credit2.c index 93c6d32..9f5a190 100644 --- a/xen/common/sched_credit2.c +++ b/xen/common/sched_credit2.c @@ -2627,28 +2627,15 @@ csched2_dump_vcpu(struct csched2_private *prv, struct csched2_vcpu *svc) printk("\n"); } -static void -csched2_dump_pcpu(const struct scheduler *ops, int cpu) +static inline void +dump_pcpu(const struct scheduler *ops, int cpu) { struct csched2_private *prv = CSCHED2_PRIV(ops); struct csched2_vcpu *svc; - unsigned long flags; - spinlock_t *lock; #define cpustr keyhandler_scratch - /* - * We need both locks: - * - we print current, so we need the runqueue lock for this - * cpu (the one of the runqueue this cpu is associated to); - * - csched2_dump_vcpu() wants to access domains' weights, - * which are protected by the private scheduler lock. - */ - read_lock_irqsave(&prv->lock, flags); - lock = per_cpu(schedule_data, cpu).schedule_lock; - spin_lock(lock); - cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_sibling_mask, cpu)); - printk(" runq=%d, sibling=%s, ", c2r(ops, cpu), cpustr); + printk("CPU[%02d] runq=%d, sibling=%s, ", cpu, c2r(ops, cpu), cpustr); cpumask_scnprintf(cpustr, sizeof(cpustr), per_cpu(cpu_core_mask, cpu)); printk("core=%s\n", cpustr); @@ -2659,9 +2646,6 @@ csched2_dump_pcpu(const struct scheduler *ops, int cpu) printk("\trun: "); csched2_dump_vcpu(prv, svc); } - - spin_unlock(lock); - read_unlock_irqrestore(&prv->lock, flags); #undef cpustr } @@ -2671,7 +2655,7 @@ csched2_dump(const struct scheduler *ops) struct list_head *iter_sdom; struct csched2_private *prv = CSCHED2_PRIV(ops); unsigned long flags; - int i, loop; + unsigned int i, j, loop; #define cpustr keyhandler_scratch /* @@ -2741,7 +2725,6 @@ csched2_dump(const struct scheduler *ops) } } - printk("Runqueue info:\n"); for_each_cpu(i, &prv->active_queues) { struct csched2_runqueue_data *rqd = prv->rqd + i; @@ -2750,7 +2733,13 @@ csched2_dump(const struct scheduler *ops) /* We need the lock to scan the runqueue. */ spin_lock(&rqd->lock); - printk("runqueue %d:\n", i); + + printk("Runqueue %d:\n", i); + + for_each_cpu(j, &rqd->active) + dump_pcpu(ops, j); + + printk("RUNQ:\n"); list_for_each( iter, runq ) { struct csched2_vcpu *svc = __runq_elem(iter); @@ -3108,7 +3097,6 @@ static const struct scheduler sched_credit2_def = { .do_schedule = csched2_schedule, .context_saved = csched2_context_saved, - .dump_cpu_state = csched2_dump_pcpu, .dump_settings = csched2_dump, .init = csched2_init, .deinit = csched2_deinit, diff --git a/xen/common/sched_rt.c b/xen/common/sched_rt.c index 24b4b22..f2d979c 100644 --- a/xen/common/sched_rt.c +++ b/xen/common/sched_rt.c @@ -320,10 +320,17 @@ static void rt_dump_pcpu(const struct scheduler *ops, int cpu) { struct rt_private *prv = rt_priv(ops); + struct rt_vcpu *svc; unsigned long flags; spin_lock_irqsave(&prv->lock, flags); - rt_dump_vcpu(ops, rt_vcpu(curr_on_cpu(cpu))); + printk("CPU[%02d]\n", cpu); + /* current VCPU (nothing to say if that's the idle vcpu). */ + svc = rt_vcpu(curr_on_cpu(cpu)); + if ( svc && !is_idle_vcpu(svc->vcpu) ) + { + rt_dump_vcpu(ops, svc); + } spin_unlock_irqrestore(&prv->lock, flags); } diff --git a/xen/common/schedule.c b/xen/common/schedule.c index ed77990..e4320f3 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -1844,11 +1844,11 @@ void schedule_dump(struct cpupool *c) cpus = &cpupool_free_cpus; } - printk("CPUs info:\n"); - for_each_cpu (i, cpus) + if ( sched->dump_cpu_state != NULL ) { - printk("CPU[%02d] ", i); - SCHED_OP(sched, dump_cpu_state, i); + printk("CPUs info:\n"); + for_each_cpu (i, cpus) + SCHED_OP(sched, dump_cpu_state, i); } } _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |