One aspect worth noting is that cpumask_raise_softirq() gets brought in sync here with cpu_raise_softirq() in that now both don't attempt to raise a self-IPI on the processing CPU. --- a/xen/arch/x86/acpi/cpu_idle.c +++ b/xen/arch/x86/acpi/cpu_idle.c @@ -340,6 +340,16 @@ void cpuidle_wakeup_mwait(cpumask_t *mas cpumask_andnot(mask, mask, &target); } +bool_t arch_skip_send_event_check(unsigned int cpu) +{ + /* + * This relies on softirq_pending() and mwait_wakeup() to access data + * on the same cache line. + */ + smp_mb(); + return !!cpumask_test_cpu(cpu, &cpuidle_mwait_flags); +} + void mwait_idle_with_hints(unsigned int eax, unsigned int ecx) { unsigned int cpu = smp_processor_id(); @@ -359,7 +369,7 @@ void mwait_idle_with_hints(unsigned int * Timer deadline passing is the event on which we will be woken via * cpuidle_mwait_wakeup. So check it now that the location is armed. */ - if ( expires > NOW() || expires == 0 ) + if ( (expires > NOW() || expires == 0) && !softirq_pending(cpu) ) { cpumask_set_cpu(cpu, &cpuidle_mwait_flags); __mwait(eax, ecx); --- a/xen/common/softirq.c +++ b/xen/common/softirq.c @@ -70,12 +70,14 @@ void open_softirq(int nr, softirq_handle void cpumask_raise_softirq(const cpumask_t *mask, unsigned int nr) { - int cpu; + unsigned int cpu, this_cpu = smp_processor_id(); cpumask_t send_mask; cpumask_clear(&send_mask); for_each_cpu(cpu, mask) - if ( !test_and_set_bit(nr, &softirq_pending(cpu)) ) + if ( !test_and_set_bit(nr, &softirq_pending(cpu)) && + cpu != this_cpu && + !arch_skip_send_event_check(cpu) ) cpumask_set_cpu(cpu, &send_mask); smp_send_event_check_mask(&send_mask); @@ -84,7 +86,8 @@ void cpumask_raise_softirq(const cpumask void cpu_raise_softirq(unsigned int cpu, unsigned int nr) { if ( !test_and_set_bit(nr, &softirq_pending(cpu)) - && (cpu != smp_processor_id()) ) + && (cpu != smp_processor_id()) + && !arch_skip_send_event_check(cpu) ) smp_send_event_check_cpu(cpu); } --- a/xen/include/asm-arm/softirq.h +++ b/xen/include/asm-arm/softirq.h @@ -3,6 +3,8 @@ #define NR_ARCH_SOFTIRQS 0 +#define arch_skip_send_event_check(cpu) 0 + #endif /* __ASM_SOFTIRQ_H__ */ /* * Local variables: --- a/xen/include/asm-x86/softirq.h +++ b/xen/include/asm-x86/softirq.h @@ -9,4 +9,6 @@ #define PCI_SERR_SOFTIRQ (NR_COMMON_SOFTIRQS + 4) #define NR_ARCH_SOFTIRQS 5 +bool_t arch_skip_send_event_check(unsigned int cpu); + #endif /* __ASM_SOFTIRQ_H__ */