[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 2/4] x86/HVM: unify and fix #UD intercept
The SVM and VMX versions really were identical, so instead of fixing the same issue in two places, fold them at once. The issue fixed is the missing seg:off -> linear translation of the current code address. Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -92,9 +92,12 @@ unsigned long __section(".bss.page_align static bool_t __initdata opt_hap_enabled = 1; boolean_param("hap", opt_hap_enabled); -#ifndef opt_hvm_fep -bool_t opt_hvm_fep; +#ifndef NDEBUG +/* Permit use of the Forced Emulation Prefix in HVM guests */ +static bool_t opt_hvm_fep; boolean_param("hvm_fep", opt_hvm_fep); +#else +#define opt_hvm_fep 0 #endif /* Xen command-line option to enable altp2m */ @@ -4931,6 +4934,49 @@ gp_fault: return X86EMUL_EXCEPTION; } +void hvm_ud_intercept(struct cpu_user_regs *regs) +{ + struct hvm_emulate_ctxt ctxt; + + if ( opt_hvm_fep ) + { + struct vcpu *cur = current; + struct segment_register cs; + unsigned long addr; + char sig[5]; /* ud2; .ascii "xen" */ + + hvm_get_segment_register(cur, x86_seg_cs, &cs); + if ( hvm_virtual_to_linear_addr(x86_seg_cs, &cs, regs->eip, + sizeof(sig), hvm_access_insn_fetch, + (hvm_long_mode_enabled(cur) && + cs.attr.fields.l) ? 64 : + cs.attr.fields.db ? 32 : 16, &addr) && + (hvm_fetch_from_guest_virt_nofault(sig, addr, sizeof(sig), + 0) == HVMCOPY_okay) && + (memcmp(sig, "\xf\xbxen", sizeof(sig)) == 0) ) + { + regs->eip += sizeof(sig); + regs->eflags &= ~X86_EFLAGS_RF; + } + } + + hvm_emulate_prepare(&ctxt, regs); + + switch ( hvm_emulate_one(&ctxt) ) + { + case X86EMUL_UNHANDLEABLE: + hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE); + break; + case X86EMUL_EXCEPTION: + if ( ctxt.exn_pending ) + hvm_inject_trap(&ctxt.trap); + /* fall through */ + default: + hvm_emulate_writeback(&ctxt); + break; + } +} + enum hvm_intblk hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack) { unsigned long intr_shadow; --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -2123,43 +2123,6 @@ svm_vmexit_do_vmsave(struct vmcb_struct return; } -static void svm_vmexit_ud_intercept(struct cpu_user_regs *regs) -{ - struct hvm_emulate_ctxt ctxt; - int rc; - - if ( opt_hvm_fep ) - { - char sig[5]; /* ud2; .ascii "xen" */ - - if ( (hvm_fetch_from_guest_virt_nofault( - sig, regs->eip, sizeof(sig), 0) == HVMCOPY_okay) && - (memcmp(sig, "\xf\xbxen", sizeof(sig)) == 0) ) - { - regs->eip += sizeof(sig); - regs->eflags &= ~X86_EFLAGS_RF; - } - } - - hvm_emulate_prepare(&ctxt, regs); - - rc = hvm_emulate_one(&ctxt); - - switch ( rc ) - { - case X86EMUL_UNHANDLEABLE: - hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE); - break; - case X86EMUL_EXCEPTION: - if ( ctxt.exn_pending ) - hvm_inject_trap(&ctxt.trap); - /* fall through */ - default: - hvm_emulate_writeback(&ctxt); - break; - } -} - static int svm_is_erratum_383(struct cpu_user_regs *regs) { uint64_t msr_content; @@ -2491,7 +2454,7 @@ void svm_vmexit_handler(struct cpu_user_ break; case VMEXIT_EXCEPTION_UD: - svm_vmexit_ud_intercept(regs); + hvm_ud_intercept(regs); break; /* Asynchronous event, handled when we STGI'd after the VMEXIT. */ --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -2747,43 +2747,6 @@ void vmx_enter_realmode(struct cpu_user_ regs->eflags |= (X86_EFLAGS_VM | X86_EFLAGS_IOPL); } -static void vmx_vmexit_ud_intercept(struct cpu_user_regs *regs) -{ - struct hvm_emulate_ctxt ctxt; - int rc; - - if ( opt_hvm_fep ) - { - char sig[5]; /* ud2; .ascii "xen" */ - - if ( (hvm_fetch_from_guest_virt_nofault( - sig, regs->eip, sizeof(sig), 0) == HVMCOPY_okay) && - (memcmp(sig, "\xf\xbxen", sizeof(sig)) == 0) ) - { - regs->eip += sizeof(sig); - regs->eflags &= ~X86_EFLAGS_RF; - } - } - - hvm_emulate_prepare(&ctxt, regs); - - rc = hvm_emulate_one(&ctxt); - - switch ( rc ) - { - case X86EMUL_UNHANDLEABLE: - hvm_inject_hw_exception(TRAP_invalid_op, HVM_DELIVER_NO_ERROR_CODE); - break; - case X86EMUL_EXCEPTION: - if ( ctxt.exn_pending ) - hvm_inject_trap(&ctxt.trap); - /* fall through */ - default: - hvm_emulate_writeback(&ctxt); - break; - } -} - static int vmx_handle_eoi_write(void) { unsigned long exit_qualification; @@ -3138,7 +3101,7 @@ void vmx_vmexit_handler(struct cpu_user_ break; case TRAP_invalid_op: HVMTRACE_1D(TRAP, vector); - vmx_vmexit_ud_intercept(regs); + hvm_ud_intercept(regs); break; default: HVMTRACE_1D(TRAP, vector); --- a/xen/include/asm-x86/hvm/hvm.h +++ b/xen/include/asm-x86/hvm/hvm.h @@ -551,13 +551,6 @@ static inline bool_t hvm_altp2m_supporte return hvm_funcs.altp2m_supported; } -#ifndef NDEBUG -/* Permit use of the Forced Emulation Prefix in HVM guests */ -extern bool_t opt_hvm_fep; -#else -#define opt_hvm_fep 0 -#endif - /* updates the current hardware p2m */ void altp2m_vcpu_update_p2m(struct vcpu *v); --- a/xen/include/asm-x86/hvm/support.h +++ b/xen/include/asm-x86/hvm/support.h @@ -131,6 +131,7 @@ int hvm_msr_write_intercept( unsigned int msr, uint64_t msr_content, bool_t may_defer); int hvm_mov_to_cr(unsigned int cr, unsigned int gpr); int hvm_mov_from_cr(unsigned int cr, unsigned int gpr); +void hvm_ud_intercept(struct cpu_user_regs *); #endif /* __ASM_X86_HVM_SUPPORT_H__ */ Attachment:
x86-HVM-guest-fetch-virt.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |