IRQ/evtchn: adjust affinity handling Virtually forever, Xen code diverged from native in the way affinities of IRQs got managed: Native, even if restricting handling of an IRQ to a single CPU e.g. because of APIC ID constraints, it would still keep the affinity set to all permitted CPUs. Xen instead restricted the affinity along with the handling. Retain that behavior only for per-CPU IRQs (and for other dynamic ones on their initial setup, albeit perhaps event that is still too strict), but make physical ones (and dynamic ones if their affinity gets adjusted an the fly) match native behavior. Signed-off-by: Jan Beulich --- a/drivers/xen/core/evtchn.c +++ b/drivers/xen/core/evtchn.c @@ -137,21 +137,28 @@ static inline unsigned long active_evtch ~sh->evtchn_mask[idx]); } -static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) +static void _bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu, int irq, + cpumask_t cpumask) { shared_info_t *s = HYPERVISOR_shared_info; - int irq = evtchn_to_irq[chn]; BUG_ON(!test_bit(chn, s->evtchn_mask)); - if (irq != -1) - set_native_irq_info(irq, cpumask_of_cpu(cpu)); + if (irq >= 0) { + BUG_ON(!cpu_isset(cpu, cpumask)); + set_native_irq_info(irq, cpumask); + } clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]); set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]); cpu_evtchn[chn] = cpu; } +static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) +{ + _bind_evtchn_to_cpu(chn, cpu, evtchn_to_irq[chn], cpumask_of_cpu(cpu)); +} + static void init_evtchn_cpu_bindings(void) { int i; @@ -180,6 +187,11 @@ static inline unsigned long active_evtch return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]); } +static void _bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu, int irq, + cpumask_t cpumask) +{ +} + static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) { } @@ -663,30 +675,32 @@ void unbind_from_irqhandler(unsigned int EXPORT_SYMBOL_GPL(unbind_from_irqhandler); #ifdef CONFIG_SMP -void rebind_evtchn_to_cpu(int port, unsigned int cpu) +static void _rebind_evtchn_to_cpu(int port, unsigned int cpu, int irq, + cpumask_t dest) { struct evtchn_bind_vcpu ebv = { .port = port, .vcpu = cpu }; int masked; masked = test_and_set_evtchn_mask(port); if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &ebv) == 0) - bind_evtchn_to_cpu(port, cpu); + _bind_evtchn_to_cpu(port, cpu, irq, dest); if (!masked) unmask_evtchn(port); } -static void rebind_irq_to_cpu(unsigned int irq, unsigned int tcpu) +void rebind_evtchn_to_cpu(int port, unsigned int cpu) { - int evtchn = evtchn_from_irq(irq); - - if (VALID_EVTCHN(evtchn)) - rebind_evtchn_to_cpu(evtchn, tcpu); + _rebind_evtchn_to_cpu(port, cpu, evtchn_to_irq[port], + cpumask_of_cpu(cpu)); } static void set_affinity_irq(unsigned int irq, cpumask_t dest) { + int evtchn = evtchn_from_irq(irq); unsigned tcpu = first_cpu(dest); - rebind_irq_to_cpu(irq, tcpu); + + if (VALID_EVTCHN(evtchn)) + _rebind_evtchn_to_cpu(evtchn, tcpu, irq, dest); } #endif @@ -854,7 +868,7 @@ static unsigned int startup_pirq(unsigne pirq_query_unmask(irq); evtchn_to_irq[evtchn] = irq; - bind_evtchn_to_cpu(evtchn, 0); + _bind_evtchn_to_cpu(evtchn, 0, irq, cpu_possible_map); irq_info[irq] = mk_irq_info(IRQT_PIRQ, bind_pirq.pirq, evtchn); out: @@ -1019,7 +1033,7 @@ static void restore_cpu_virqs(unsigned i /* Record the new mapping. */ evtchn_to_irq[evtchn] = irq; irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn); - bind_evtchn_to_cpu(evtchn, cpu); + _bind_evtchn_to_cpu(evtchn, cpu, -1, CPU_MASK_NONE); /* Ready for use. */ unmask_evtchn(evtchn); @@ -1047,7 +1061,7 @@ static void restore_cpu_ipis(unsigned in /* Record the new mapping. */ evtchn_to_irq[evtchn] = irq; irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn); - bind_evtchn_to_cpu(evtchn, cpu); + _bind_evtchn_to_cpu(evtchn, cpu, -1, CPU_MASK_NONE); /* Ready for use. */ unmask_evtchn(evtchn);