x86: don't rely on __softirq_pending to be the first field in irq_cpustat_t This is even more so as the field doesn't have a comment to that effect in the structure definition. Once modifying the respective assembly code, also convert the IRQSTAT_shift users to do a 32-bit shift only (as we won't support 48M CPUs any time soon) and use "cmpl" instead of "testl" when checking the field (both reducing code size). Signed-off-by: Jan Beulich --- a/xen/arch/x86/hvm/svm/entry.S +++ b/xen/arch/x86/hvm/svm/entry.S @@ -41,10 +41,10 @@ ENTRY(svm_asm_do_resume) CLGI mov VCPU_processor(%rbx),%eax - shl $IRQSTAT_shift,%rax - lea irq_stat(%rip),%rdx - testl $~0,(%rdx,%rax,1) - jnz .Lsvm_process_softirqs + shl $IRQSTAT_shift,%eax + lea irq_stat+IRQSTAT_softirq_pending(%rip),%rdx + cmpl $0,(%rdx,%rax,1) + jne .Lsvm_process_softirqs testb $0, VCPU_nsvm_hap_enabled(%rbx) UNLIKELY_START(nz, nsvm_hap) --- a/xen/arch/x86/hvm/vmx/entry.S +++ b/xen/arch/x86/hvm/vmx/entry.S @@ -97,8 +97,8 @@ vmx_asm_do_vmentry: cli mov VCPU_processor(%rbx),%eax - shl $IRQSTAT_shift,%rax - lea irq_stat(%rip),%rdx + shl $IRQSTAT_shift,%eax + lea irq_stat+IRQSTAT_softirq_pending(%rip),%rdx cmpl $0,(%rdx,%rax,1) jnz .Lvmx_process_softirqs --- a/xen/arch/x86/x86_64/asm-offsets.c +++ b/xen/arch/x86/x86_64/asm-offsets.c @@ -162,6 +162,7 @@ void __dummy__(void) #endif DEFINE(IRQSTAT_shift, LOG_2(sizeof(irq_cpustat_t))); + OFFSET(IRQSTAT_softirq_pending, irq_cpustat_t, __softirq_pending); BLANK(); OFFSET(CPUINFO86_ext_features, struct cpuinfo_x86, x86_capability[1]); --- a/xen/arch/x86/x86_64/compat/entry.S +++ b/xen/arch/x86/x86_64/compat/entry.S @@ -96,10 +96,10 @@ ENTRY(compat_test_all_events) cli # tests must not race interrupts /*compat_test_softirqs:*/ movl VCPU_processor(%rbx),%eax - shlq $IRQSTAT_shift,%rax - leaq irq_stat(%rip),%rcx - testl $~0,(%rcx,%rax,1) - jnz compat_process_softirqs + shll $IRQSTAT_shift,%eax + leaq irq_stat+IRQSTAT_softirq_pending(%rip),%rcx + cmpl $0,(%rcx,%rax,1) + jne compat_process_softirqs testb $1,VCPU_mce_pending(%rbx) jnz compat_process_mce .Lcompat_test_guest_nmi: --- a/xen/arch/x86/x86_64/entry.S +++ b/xen/arch/x86/x86_64/entry.S @@ -195,8 +195,8 @@ test_all_events: cli # tests must not race interrupts /*test_softirqs:*/ movl VCPU_processor(%rbx),%eax - shl $IRQSTAT_shift,%rax - leaq irq_stat(%rip),%rcx + shll $IRQSTAT_shift,%eax + leaq irq_stat+IRQSTAT_softirq_pending(%rip),%rcx cmpl $0,(%rcx,%rax,1) jne process_softirqs testb $1,VCPU_mce_pending(%rbx) @@ -663,7 +663,7 @@ handle_ist_exception: /* Send an IPI to ourselves to cover for the lack of event checking. */ movl VCPU_processor(%rbx),%eax shll $IRQSTAT_shift,%eax - leaq irq_stat(%rip),%rcx + leaq irq_stat+IRQSTAT_softirq_pending(%rip),%rcx cmpl $0,(%rcx,%rax,1) je 1f movl $EVENT_CHECK_VECTOR,%edi