[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [PATCH-for-4.17] xen/sched: fix restore_vcpu_affinity() by removing it
On 21.10.2022 08:58, Juergen Gross wrote: > --- a/xen/common/sched/core.c > +++ b/xen/common/sched/core.c > @@ -1196,76 +1196,6 @@ static void sched_reset_affinity_broken(const struct > sched_unit *unit) > v->affinity_broken = false; > } My pre-push build test failed because the function above ... > -void restore_vcpu_affinity(struct domain *d) > -{ > - unsigned int cpu = smp_processor_id(); > - struct sched_unit *unit; > - > - ASSERT(system_state == SYS_STATE_resume); > - > - rcu_read_lock(&sched_res_rculock); > - > - for_each_sched_unit ( d, unit ) > - { > - spinlock_t *lock; > - unsigned int old_cpu = sched_unit_master(unit); > - struct sched_resource *res; > - > - ASSERT(!unit_runnable(unit)); > - > - /* > - * Re-assign the initial processor as after resume we have no > - * guarantee the old processor has come back to life again. > - * > - * Therefore, here, before actually unpausing the domains, we should > - * set v->processor of each of their vCPUs to something that will > - * make sense for the scheduler of the cpupool in which they are in. > - */ > - lock = unit_schedule_lock_irq(unit); > - > - cpumask_and(cpumask_scratch_cpu(cpu), unit->cpu_hard_affinity, > - cpupool_domain_master_cpumask(d)); > - if ( cpumask_empty(cpumask_scratch_cpu(cpu)) ) > - { > - if ( sched_check_affinity_broken(unit) ) > - { > - sched_set_affinity(unit, unit->cpu_hard_affinity_saved, > NULL); > - sched_reset_affinity_broken(unit); ... has its only use removed here. It didn't seem appropriate for me to go and silently remove that function as well. Jan > - cpumask_and(cpumask_scratch_cpu(cpu), > unit->cpu_hard_affinity, > - cpupool_domain_master_cpumask(d)); > - } > - > - if ( cpumask_empty(cpumask_scratch_cpu(cpu)) ) > - { > - /* Affinity settings of one vcpu are for the complete unit. > */ > - printk(XENLOG_DEBUG "Breaking affinity for %pv\n", > - unit->vcpu_list); > - sched_set_affinity(unit, &cpumask_all, NULL); > - cpumask_and(cpumask_scratch_cpu(cpu), > unit->cpu_hard_affinity, > - cpupool_domain_master_cpumask(d)); > - } > - } > - > - res = get_sched_res(cpumask_any(cpumask_scratch_cpu(cpu))); > - sched_set_res(unit, res); > - > - spin_unlock_irq(lock); > - > - /* v->processor might have changed, so reacquire the lock. */ > - lock = unit_schedule_lock_irq(unit); > - res = sched_pick_resource(unit_scheduler(unit), unit); > - sched_set_res(unit, res); > - spin_unlock_irq(lock); > - > - if ( old_cpu != sched_unit_master(unit) ) > - sched_move_irqs(unit); > - } > - > - rcu_read_unlock(&sched_res_rculock); > - > - domain_update_node_affinity(d); > -} > - > /* > * This function is used by cpu_hotplug code via cpu notifier chain > * and from cpupools to switch schedulers on a cpu. > diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h > index 557b3229f6..072e4846aa 100644 > --- a/xen/include/xen/sched.h > +++ b/xen/include/xen/sched.h > @@ -1019,7 +1019,6 @@ void vcpu_set_periodic_timer(struct vcpu *v, s_time_t > value); > void sched_setup_dom0_vcpus(struct domain *d); > int vcpu_temporary_affinity(struct vcpu *v, unsigned int cpu, uint8_t > reason); > int vcpu_set_hard_affinity(struct vcpu *v, const cpumask_t *affinity); > -void restore_vcpu_affinity(struct domain *d); > int vcpu_affinity_domctl(struct domain *d, uint32_t cmd, > struct xen_domctl_vcpuaffinity *vcpuaff); >
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |