[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2] x86emul: add "unblock NMI" retire flag
No matter that we emulate IRET for (guest) real more only right now, we should get its effect on (virtual) NMI delivery right. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- v2: Adjust x86_emulate_wrapper() accordingly. --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -1955,25 +1955,32 @@ static int _hvm_emulate_one(struct hvm_e memcpy(vio->mmio_insn, hvmemul_ctxt->insn_buf, vio->mmio_insn_bytes); } - if ( rc != X86EMUL_OKAY ) - return rc; + new_intr_shadow = hvmemul_ctxt->intr_shadow; - if ( hvmemul_ctxt->ctxt.retire.singlestep ) - hvm_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC); + /* + * IRET, if valid in the given context, clears NMI blocking + * (irrespective of rc). + */ + if ( hvmemul_ctxt->ctxt.retire.unblock_nmi ) + new_intr_shadow &= ~HVM_INTR_SHADOW_NMI; - new_intr_shadow = hvmemul_ctxt->intr_shadow; + if ( rc == X86EMUL_OKAY ) + { + if ( hvmemul_ctxt->ctxt.retire.singlestep ) + hvm_inject_hw_exception(TRAP_debug, X86_EVENT_NO_EC); - /* MOV-SS instruction toggles MOV-SS shadow, else we just clear it. */ - if ( hvmemul_ctxt->ctxt.retire.mov_ss ) - new_intr_shadow ^= HVM_INTR_SHADOW_MOV_SS; - else - new_intr_shadow &= ~HVM_INTR_SHADOW_MOV_SS; - - /* STI instruction toggles STI shadow, else we just clear it. */ - if ( hvmemul_ctxt->ctxt.retire.sti ) - new_intr_shadow ^= HVM_INTR_SHADOW_STI; - else - new_intr_shadow &= ~HVM_INTR_SHADOW_STI; + /* MOV-SS instruction toggles MOV-SS shadow, else we just clear it. */ + if ( hvmemul_ctxt->ctxt.retire.mov_ss ) + new_intr_shadow ^= HVM_INTR_SHADOW_MOV_SS; + else + new_intr_shadow &= ~HVM_INTR_SHADOW_MOV_SS; + + /* STI instruction toggles STI shadow, else we just clear it. */ + if ( hvmemul_ctxt->ctxt.retire.sti ) + new_intr_shadow ^= HVM_INTR_SHADOW_STI; + else + new_intr_shadow &= ~HVM_INTR_SHADOW_STI; + } if ( hvmemul_ctxt->intr_shadow != new_intr_shadow ) { @@ -1981,13 +1988,11 @@ static int _hvm_emulate_one(struct hvm_e hvm_funcs.set_interrupt_shadow(curr, new_intr_shadow); } - if ( hvmemul_ctxt->ctxt.retire.hlt && + if ( rc == X86EMUL_OKAY && hvmemul_ctxt->ctxt.retire.hlt && !hvm_local_events_need_delivery(curr) ) - { hvm_hlt(regs->eflags); - } - return X86EMUL_OKAY; + return rc; } int hvm_emulate_one( --- a/xen/arch/x86/x86_emulate/x86_emulate.c +++ b/xen/arch/x86/x86_emulate/x86_emulate.c @@ -4002,6 +4002,7 @@ x86_emulate( uint32_t mask = X86_EFLAGS_VIP | X86_EFLAGS_VIF | X86_EFLAGS_VM; fail_if(!in_realmode(ctxt, ops)); + ctxt->retire.unblock_nmi = true; if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes), &eip, op_bytes, ctxt, ops)) || (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes), @@ -7918,9 +7919,17 @@ int x86_emulate_wrapper( rc = x86_emulate(ctxt, ops); - /* Retire flags should only be set for successful instruction emulation. */ + /* + * Most retire flags should only be set for successful instruction + * emulation. + */ if ( rc != X86EMUL_OKAY ) - ASSERT(ctxt->retire.raw == 0); + { + typeof(ctxt->retire) retire = ctxt->retire; + + retire.unblock_nmi = false; + ASSERT(!retire.raw); + } /* All cases returning X86EMUL_EXCEPTION should have fault semantics. */ if ( rc == X86EMUL_EXCEPTION ) --- a/xen/arch/x86/x86_emulate/x86_emulate.h +++ b/xen/arch/x86/x86_emulate/x86_emulate.h @@ -490,6 +490,7 @@ struct x86_emulate_ctxt bool hlt:1; /* Instruction HLTed. */ bool mov_ss:1; /* Instruction sets MOV-SS irq shadow. */ bool sti:1; /* Instruction sets STI irq shadow. */ + bool unblock_nmi:1; /* Instruction clears NMI blocking. */ bool singlestep:1; /* Singlestepping was active. */ }; } retire; Attachment:
x86emul-unblock-NMI.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |