x86: defer processing events on the NMI exit path Otherwise, we may end up in the scheduler, keeping NMIs masked for a possibly unbounded period of time (until whenever the next IRET gets executed). Enforce timely event processing by sending a self IPI. Of course it's open for discussion whether to always use the straight exit path from handle_ist_exception. Signed-off-by: Jan Beulich --- v2: Send IPI to ourselves to enforce event processing. Dropped all ACKs. --- a/xen/arch/x86/x86_64/compat/entry.S +++ b/xen/arch/x86/x86_64/compat/entry.S @@ -171,7 +171,7 @@ compat_bad_hypercall: jmp compat_test_all_events /* %rbx: struct vcpu, interrupts disabled */ -compat_restore_all_guest: +ENTRY(compat_restore_all_guest) ASSERT_INTERRUPTS_DISABLED RESTORE_ALL adj=8 compat=1 .Lft0: iretq --- a/xen/arch/x86/x86_64/entry.S +++ b/xen/arch/x86/x86_64/entry.S @@ -11,6 +11,7 @@ #include #include #include +#include ALIGN /* %rbx: struct vcpu */ @@ -635,6 +636,9 @@ ENTRY(early_page_fault) jmp restore_all_xen .popsection +ENTRY(nmi) + pushq $0 + movl $TRAP_nmi,4(%rsp) handle_ist_exception: SAVE_ALL testb $3,UREGS_cs(%rsp) @@ -649,12 +653,25 @@ handle_ist_exception: movzbl UREGS_entry_vector(%rsp),%eax leaq exception_table(%rip),%rdx callq *(%rdx,%rax,8) - jmp ret_from_intr + cmpb $TRAP_nmi,UREGS_entry_vector(%rsp) + jne ret_from_intr -ENTRY(nmi) - pushq $0 - movl $TRAP_nmi,4(%rsp) - jmp handle_ist_exception + /* We want to get straight to the IRET on the NMI exit path. */ + testb $3,UREGS_cs(%rsp) + jz restore_all_xen + GET_CURRENT(%rbx) + /* Send an IPI to ourselves to cover for the lack of event checking. */ + movl VCPU_processor(%rbx),%eax + shll $IRQSTAT_shift,%eax + leaq irq_stat(%rip),%rcx + cmpl $0,(%rcx,%rax,1) + je 1f + movl $EVENT_CHECK_VECTOR,%edi + call send_IPI_self +1: movq VCPU_domain(%rbx),%rax + cmpb $0,DOMAIN_is_32bit_pv(%rax) + je restore_all_guest + jmp compat_restore_all_guest ENTRY(nmi_crash) pushq $0