|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 2 of 2] Avoid vcpu migration of paused vcpus
>>> On 22.03.12 at 09:22, Juergen Gross <juergen.gross@xxxxxxxxxxxxxx> wrote:
>--- a/xen/common/schedule.c Thu Mar 22 09:21:44 2012 +0100
>+++ b/xen/common/schedule.c Thu Mar 22 09:21:47 2012 +0100
>@@ -525,6 +525,47 @@ void vcpu_force_reschedule(struct vcpu *
> }
> }
>
>+static int vcpu_chk_affinity(struct vcpu *v, unsigned int cpu)
>+{
>+ cpumask_t online_affinity;
>+ struct cpupool *c;
>+
>+ c = v->domain->cpupool;
>+ cpumask_and(&online_affinity, v->cpu_affinity, c->cpu_valid);
>+ if ( cpumask_empty(&online_affinity) &&
There's no need for a local cpumask_t variable here - please use
!cpumask_intersects(v->cpu_affinity, c->cpu_valid) instead.
>+ cpumask_test_cpu(cpu, v->cpu_affinity) )
>+ {
>+ printk("Breaking vcpu affinity for domain %d vcpu %d\n",
>+ v->domain->domain_id, v->vcpu_id);
>+ cpumask_setall(v->cpu_affinity);
>+ }
>+
>+ return ( !cpumask_test_cpu(cpu, c->cpu_valid) && (v->processor == cpu) );
Please drop the extra outermost parentheses.
>+}
>+
>+void vcpu_arouse(struct vcpu *v)
>+{
>+ unsigned long flags;
>+
>+ if ( atomic_read(&v->pause_count) ||
>+ atomic_read(&v->domain->pause_count) )
Is it not possible (or even more correct) to use vcpu_runnable() here?
>+ return;
>+
>+ vcpu_schedule_lock_irqsave(v, flags);
>+
>+ if ( unlikely(vcpu_chk_affinity(v, v->processor) && (v != current)) )
unlikely() is generally useful only around individual conditions (i.e.
not around && or || expressions).
>+ {
>+ set_bit(_VPF_migrating, &v->pause_flags);
>+ vcpu_schedule_unlock_irqrestore(v, flags);
>+ vcpu_migrate(v);
>+ return;
>+ }
>+
>+ vcpu_schedule_unlock_irqrestore(v, flags);
>+
>+ vcpu_wake(v);
>+}
>+
> /*
> * This function is used by cpu_hotplug code from stop_machine context
> * and from cpupools to switch schedulers on a cpu.
>@@ -534,7 +575,6 @@ int cpu_disable_scheduler(unsigned int c
> struct domain *d;
> struct vcpu *v;
> struct cpupool *c;
>- cpumask_t online_affinity;
> int ret = 0;
>
> c = per_cpu(cpupool, cpu);
>@@ -547,34 +587,33 @@ int cpu_disable_scheduler(unsigned int c
> {
> vcpu_schedule_lock_irq(v);
>
>- cpumask_and(&online_affinity, v->cpu_affinity, c->cpu_valid);
>- if ( cpumask_empty(&online_affinity) &&
>- cpumask_test_cpu(cpu, v->cpu_affinity) )
>+ if ( likely(!atomic_read(&v->pause_count) &&
>+ !atomic_read(&d->pause_count)) )
Same question as above regarding vcpu_runnable().
> {
>- printk("Breaking vcpu affinity for domain %d vcpu %d\n",
>- v->domain->domain_id, v->vcpu_id);
>- cpumask_setall(v->cpu_affinity);
>- }
>+ if ( vcpu_chk_affinity(v, cpu) )
>+ {
>+ set_bit(_VPF_migrating, &v->pause_flags);
>+ vcpu_schedule_unlock_irq(v);
>+ vcpu_sleep_nosync(v);
>+ vcpu_migrate(v);
>+ }
>+ else
>+ {
>+ vcpu_schedule_unlock_irq(v);
>+ }
Please drop the unnecessary braces here, as per the recently posted
coding style draft.
Jan
>
>- if ( v->processor == cpu )
>- {
>- set_bit(_VPF_migrating, &v->pause_flags);
>- vcpu_schedule_unlock_irq(v);
>- vcpu_sleep_nosync(v);
>- vcpu_migrate(v);
>+ /*
>+ * A vcpu active in the hypervisor will not be migratable.
>+ * The caller should try again after releasing and reaquiring
>+ * all locks.
>+ */
>+ if ( v->processor == cpu )
>+ ret = -EAGAIN;
> }
> else
> {
> vcpu_schedule_unlock_irq(v);
> }
>-
>- /*
>- * A vcpu active in the hypervisor will not be migratable.
>- * The caller should try again after releasing and reaquiring
>- * all locks.
>- */
>- if ( v->processor == cpu )
>- ret = -EAGAIN;
> }
>
> domain_update_node_affinity(d);
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |