[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 36/48] xen/sched: make vcpu_wake() and vcpu_sleep() core scheduling aware
vcpu_wake() and vcpu_sleep() need to be made core scheduling aware: they might need to switch a single vcpu of an already scheduled unit between running and not running. Especially when vcpu_sleep() for a vcpu is being called by a vcpu of the same scheduling unit special care must be taken in order to avoid a deadlock: the vcpu to be put asleep must be forced through a context switch without doing so for the calling vcpu. For this purpose add a vcpu flag handled in sched_slave() and in sched_wait_rendezvous_in() allowing a vcpu of the currently running unit to switch state at a higher priority than a normal schedule event. Use the same mechanism when waking up a vcpu of a currently active unit. While at it make vcpu_sleep_nosync_locked() static as it is used in schedule.c only. Signed-off-by: Juergen Gross <jgross@xxxxxxxx> --- RFC V2: add vcpu_sleep() handling and force_context_switch flag V2: fix runstate change in sched_force_context_switch() --- xen/common/schedule.c | 125 ++++++++++++++++++++++++++++++++++++++++++--- xen/include/xen/sched-if.h | 9 ++-- xen/include/xen/sched.h | 2 + 3 files changed, 127 insertions(+), 9 deletions(-) diff --git a/xen/common/schedule.c b/xen/common/schedule.c index 5cd7d2d857..fe7ab1b4de 100644 --- a/xen/common/schedule.c +++ b/xen/common/schedule.c @@ -714,8 +714,10 @@ void sched_destroy_domain(struct domain *d) } } -void vcpu_sleep_nosync_locked(struct vcpu *v) +static void vcpu_sleep_nosync_locked(struct vcpu *v) { + struct sched_unit *unit = v->sched_unit; + ASSERT(spin_is_locked(get_sched_res(v->processor)->schedule_lock)); if ( likely(!vcpu_runnable(v)) ) @@ -723,7 +725,14 @@ void vcpu_sleep_nosync_locked(struct vcpu *v) if ( v->runstate.state == RUNSTATE_runnable ) vcpu_runstate_change(v, RUNSTATE_offline, NOW()); - sched_sleep(vcpu_scheduler(v), v->sched_unit); + if ( likely(!unit_runnable(unit)) ) + sched_sleep(vcpu_scheduler(v), unit); + else if ( unit_running(unit) > 1 && v->is_running && + !v->force_context_switch ) + { + v->force_context_switch = true; + cpu_raise_softirq(v->processor, SCHED_SLAVE_SOFTIRQ); + } } } @@ -755,16 +764,22 @@ void vcpu_wake(struct vcpu *v) { unsigned long flags; spinlock_t *lock; + struct sched_unit *unit = v->sched_unit; TRACE_2D(TRC_SCHED_WAKE, v->domain->domain_id, v->vcpu_id); - lock = unit_schedule_lock_irqsave(v->sched_unit, &flags); + lock = unit_schedule_lock_irqsave(unit, &flags); if ( likely(vcpu_runnable(v)) ) { if ( v->runstate.state >= RUNSTATE_blocked ) vcpu_runstate_change(v, RUNSTATE_runnable, NOW()); - sched_wake(vcpu_scheduler(v), v->sched_unit); + sched_wake(vcpu_scheduler(v), unit); + if ( unit->is_running && !v->is_running && !v->force_context_switch ) + { + v->force_context_switch = true; + cpu_raise_softirq(v->processor, SCHED_SLAVE_SOFTIRQ); + } } else if ( !(v->pause_flags & VPF_blocked) ) { @@ -772,7 +787,7 @@ void vcpu_wake(struct vcpu *v) vcpu_runstate_change(v, RUNSTATE_offline, NOW()); } - unit_schedule_unlock_irqrestore(lock, flags, v->sched_unit); + unit_schedule_unlock_irqrestore(lock, flags, unit); } void vcpu_unblock(struct vcpu *v) @@ -1981,6 +1996,62 @@ static void sched_context_switch(struct vcpu *vprev, struct vcpu *vnext, context_switch(vprev, vnext); } +/* + * Force a context switch of a single vcpu of an unit. + * Might be called either if a vcpu of an already running unit is woken up + * or if a vcpu of a running unit is put asleep with other vcpus of the same + * unit still running. + */ +static struct vcpu *sched_force_context_switch(struct vcpu *vprev, + struct vcpu *v, + int cpu, s_time_t now) +{ + v->force_context_switch = false; + + if ( vcpu_runnable(v) == v->is_running ) + return NULL; + + if ( vcpu_runnable(v) ) + { + if ( is_idle_vcpu(vprev) ) + { + vcpu_runstate_change(vprev, RUNSTATE_runnable, now); + vprev->sched_unit = get_sched_res(cpu)->sched_unit_idle; + } + vcpu_runstate_change(v, RUNSTATE_running, now); + } + else + { + /* Make sure not to switch last vcpu of an unit away. */ + if ( unit_running(v->sched_unit) == 1 ) + return NULL; + + v->new_state = vcpu_runstate_blocked(v); + vcpu_runstate_change(v, v->new_state, now); + v = sched_unit2vcpu_cpu(vprev->sched_unit, cpu); + if ( v != vprev ) + { + if ( is_idle_vcpu(vprev) ) + { + vcpu_runstate_change(vprev, RUNSTATE_runnable, now); + vprev->sched_unit = get_sched_res(cpu)->sched_unit_idle; + } + else + { + v->sched_unit = vprev->sched_unit; + vcpu_runstate_change(v, RUNSTATE_running, now); + } + } + } + + v->is_running = 1; + + /* Make sure not to loose another slave call. */ + raise_softirq(SCHED_SLAVE_SOFTIRQ); + + return v; +} + /* * Rendezvous before taking a scheduling decision. * Called with schedule lock held, so all accesses to the rendezvous counter @@ -1996,6 +2067,7 @@ static struct sched_unit *sched_wait_rendezvous_in(struct sched_unit *prev, s_time_t now) { struct sched_unit *next; + struct vcpu *v; if ( !--prev->rendezvous_in_cnt ) { @@ -2004,8 +2076,28 @@ static struct sched_unit *sched_wait_rendezvous_in(struct sched_unit *prev, return next; } + v = unit2vcpu_cpu(prev, cpu); while ( prev->rendezvous_in_cnt ) { + if ( v && v->force_context_switch ) + { + struct vcpu *vprev = current; + + v = sched_force_context_switch(vprev, v, cpu, now); + + if ( v ) + { + /* We'll come back another time, so adjust rendezvous_in_cnt. */ + prev->rendezvous_in_cnt++; + atomic_set(&prev->rendezvous_out_cnt, 0); + + pcpu_schedule_unlock_irq(*lock, cpu); + + sched_context_switch(vprev, v, false, now); + } + + v = unit2vcpu_cpu(prev, cpu); + } /* * Coming from idle might need to do tasklet work. * In order to avoid deadlocks we can't do that here, but have to @@ -2038,10 +2130,11 @@ static struct sched_unit *sched_wait_rendezvous_in(struct sched_unit *prev, static void sched_slave(void) { - struct vcpu *vprev = current; + struct vcpu *v, *vprev = current; struct sched_unit *prev = vprev->sched_unit, *next; s_time_t now; spinlock_t *lock; + bool do_softirq = false; int cpu = smp_processor_id(); ASSERT_NOT_IN_ATOMIC(); @@ -2050,9 +2143,29 @@ static void sched_slave(void) now = NOW(); + v = unit2vcpu_cpu(prev, cpu); + if ( v && v->force_context_switch ) + { + v = sched_force_context_switch(vprev, v, cpu, now); + + if ( v ) + { + pcpu_schedule_unlock_irq(lock, cpu); + + sched_context_switch(vprev, v, false, now); + } + + do_softirq = true; + } + if ( !prev->rendezvous_in_cnt ) { pcpu_schedule_unlock_irq(lock, cpu); + + /* Check for failed forced context switch. */ + if ( do_softirq ) + raise_softirq(SCHEDULE_SOFTIRQ); + return; } diff --git a/xen/include/xen/sched-if.h b/xen/include/xen/sched-if.h index 3ac7757c0d..eb6b9ef55c 100644 --- a/xen/include/xen/sched-if.h +++ b/xen/include/xen/sched-if.h @@ -99,6 +99,11 @@ static inline bool unit_runnable(const struct sched_unit *unit) return false; } +static inline int vcpu_runstate_blocked(struct vcpu *v) +{ + return (v->pause_flags & VPF_blocked) ? RUNSTATE_blocked : RUNSTATE_offline; +} + static inline bool unit_runnable_state(const struct sched_unit *unit) { struct vcpu *v; @@ -111,9 +116,7 @@ static inline bool unit_runnable_state(const struct sched_unit *unit) { runnable = vcpu_runnable(v); - v->new_state = runnable ? RUNSTATE_running - : (v->pause_flags & VPF_blocked) - ? RUNSTATE_blocked : RUNSTATE_offline; + v->new_state = runnable ? RUNSTATE_running : vcpu_runstate_blocked(v); if ( runnable ) ret = true; diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h index ed0535946f..e868646f0e 100644 --- a/xen/include/xen/sched.h +++ b/xen/include/xen/sched.h @@ -186,6 +186,8 @@ struct vcpu bool is_running; /* VCPU should wake fast (do not deep sleep the CPU). */ bool is_urgent; + /* VCPU must context_switch without scheduling unit. */ + bool force_context_switch; #ifdef VCPU_TRAP_LAST #define VCPU_TRAP_NONE 0 -- 2.16.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxxxxxxxxx https://lists.xenproject.org/mailman/listinfo/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |