[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v2 for-4.9 5/6] x86/emul: Drop swint_emulate infrastructure
With the SVM injection logic capable of doing its own emulation, there is no need for this hardware-specific assistance in the common emulator. Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> Reviewed-by: Paul Durrant <paul.durrant@xxxxxxxxxx> Reviewed-by: Jan Beulich <jbeulich@xxxxxxxx> --- CC: Tim Deegan <tim@xxxxxxx> CC: Julien Grall <julien.grall@xxxxxxx> v2: * imm8 -> imm1 --- tools/fuzz/x86_instruction_emulator/fuzz-emul.c | 18 +-- xen/arch/x86/hvm/emulate.c | 7 - xen/arch/x86/mm.c | 2 - xen/arch/x86/mm/shadow/common.c | 1 - xen/arch/x86/x86_emulate/x86_emulate.c | 187 ++++-------------------- xen/arch/x86/x86_emulate/x86_emulate.h | 53 ------- 6 files changed, 30 insertions(+), 238 deletions(-) diff --git a/tools/fuzz/x86_instruction_emulator/fuzz-emul.c b/tools/fuzz/x86_instruction_emulator/fuzz-emul.c index 890642c..8488816 100644 --- a/tools/fuzz/x86_instruction_emulator/fuzz-emul.c +++ b/tools/fuzz/x86_instruction_emulator/fuzz-emul.c @@ -536,8 +536,7 @@ enum { HOOK_put_fpu, HOOK_invlpg, HOOK_vmfunc, - OPTION_swint_emulation, /* Two bits */ - CANONICALIZE_rip = OPTION_swint_emulation + 2, + CANONICALIZE_rip, CANONICALIZE_rsp, CANONICALIZE_rbp }; @@ -577,19 +576,6 @@ static void disable_hooks(void) MAYBE_DISABLE_HOOK(invlpg); } -static void set_swint_support(struct x86_emulate_ctxt *ctxt) -{ - unsigned int swint_opt = (input.options >> OPTION_swint_emulation) & 3; - static const enum x86_swint_emulation map[4] = { - x86_swint_emulate_none, - x86_swint_emulate_none, - x86_swint_emulate_icebp, - x86_swint_emulate_all - }; - - ctxt->swint_emulate = map[swint_opt]; -} - /* * Constrain input to architecturally-possible states where * the emulator relies on these @@ -693,8 +679,6 @@ int LLVMFuzzerTestOneInput(const uint8_t *data_p, size_t size) disable_hooks(); - set_swint_support(&ctxt); - do { /* FIXME: Until we actually implement SIGFPE handling properly */ setup_fpu_exception_handler(); diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c index 87ca801..39e4319 100644 --- a/xen/arch/x86/hvm/emulate.c +++ b/xen/arch/x86/hvm/emulate.c @@ -2033,13 +2033,6 @@ void hvm_emulate_init_once( hvmemul_ctxt->ctxt.regs = regs; hvmemul_ctxt->ctxt.vendor = curr->domain->arch.cpuid->x86_vendor; hvmemul_ctxt->ctxt.force_writeback = true; - - if ( cpu_has_vmx ) - hvmemul_ctxt->ctxt.swint_emulate = x86_swint_emulate_none; - else if ( cpu_has_svm_nrips ) - hvmemul_ctxt->ctxt.swint_emulate = x86_swint_emulate_icebp; - else - hvmemul_ctxt->ctxt.swint_emulate = x86_swint_emulate_all; } void hvm_emulate_init_per_insn( diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index be4e308..3918a37 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -5412,7 +5412,6 @@ int ptwr_do_page_fault(struct vcpu *v, unsigned long addr, .vendor = d->arch.cpuid->x86_vendor, .addr_size = is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG, .sp_size = is_pv_32bit_domain(d) ? 32 : BITS_PER_LONG, - .swint_emulate = x86_swint_emulate_none, }, }; int rc; @@ -5567,7 +5566,6 @@ int mmio_ro_do_page_fault(struct vcpu *v, unsigned long addr, .vendor = v->domain->arch.cpuid->x86_vendor, .addr_size = addr_size, .sp_size = addr_size, - .swint_emulate = x86_swint_emulate_none, .data = &mmio_ro_ctxt }; int rc; diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c index 574337c..736ceaa 100644 --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -326,7 +326,6 @@ const struct x86_emulate_ops *shadow_init_emulation( sh_ctxt->ctxt.regs = regs; sh_ctxt->ctxt.vendor = v->domain->arch.cpuid->x86_vendor; - sh_ctxt->ctxt.swint_emulate = x86_swint_emulate_none; /* Segment cache initialisation. Primed with CS. */ creg = hvm_get_seg_reg(x86_seg_cs, sh_ctxt); diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c b/xen/arch/x86/x86_emulate/x86_emulate.c index 7af8a42..8c4e885 100644 --- a/xen/arch/x86/x86_emulate/x86_emulate.c +++ b/xen/arch/x86/x86_emulate/x86_emulate.c @@ -1999,142 +1999,6 @@ static bool umip_active(struct x86_emulate_ctxt *ctxt, (cr4 & X86_CR4_UMIP); } -/* Inject a software interrupt/exception, emulating if needed. */ -static int inject_swint(enum x86_swint_type type, - uint8_t vector, uint8_t insn_len, - struct x86_emulate_ctxt *ctxt, - const struct x86_emulate_ops *ops) -{ - int rc, error_code, fault_type = EXC_GP; - - /* - * Without hardware support, injecting software interrupts/exceptions is - * problematic. - * - * All software methods of generating exceptions (other than BOUND) yield - * traps, so eip in the exception frame needs to point after the - * instruction, not at it. - * - * However, if injecting it as a hardware exception causes a fault during - * delivery, our adjustment of eip will cause the fault to be reported - * after the faulting instruction, not pointing to it. - * - * Therefore, eip can only safely be wound forwards if we are certain that - * injecting an equivalent hardware exception won't fault, which means - * emulating everything the processor would do on a control transfer. - * - * However, emulation of complete control transfers is very complicated. - * All we care about is that guest userspace cannot avoid the descriptor - * DPL check by using the Xen emulator, and successfully invoke DPL=0 - * descriptors. - * - * Any OS which would further fault during injection is going to receive a - * double fault anyway, and won't be in a position to care that the - * faulting eip is incorrect. - */ - - if ( (ctxt->swint_emulate == x86_swint_emulate_all) || - ((ctxt->swint_emulate == x86_swint_emulate_icebp) && - (type == x86_swint_icebp)) ) - { - if ( !in_realmode(ctxt, ops) ) - { - unsigned int idte_size, idte_offset; - struct { uint32_t a, b, c, d; } idte = {}; - int lm = in_longmode(ctxt, ops); - - if ( lm < 0 ) - return X86EMUL_UNHANDLEABLE; - - idte_size = lm ? 16 : 8; - idte_offset = vector * idte_size; - - /* icebp sets the External Event bit despite being an instruction. */ - error_code = (vector << 3) | ECODE_IDT | - (type == x86_swint_icebp ? ECODE_EXT : 0); - - /* - * TODO - this does not cover the v8086 mode with CR4.VME case - * correctly, but falls on the safe side from the point of view of - * a 32bit OS. Someone with many TUITs can see about reading the - * TSS Software Interrupt Redirection bitmap. - */ - if ( (ctxt->regs->eflags & X86_EFLAGS_VM) && - ((ctxt->regs->eflags & X86_EFLAGS_IOPL) != X86_EFLAGS_IOPL) ) - goto raise_exn; - - /* - * Read all 8/16 bytes so the idtr limit check is applied properly - * to this entry, even though we only end up looking at the 2nd - * word. - */ - switch ( rc = ops->read(x86_seg_idtr, idte_offset, - &idte, idte_size, ctxt) ) - { - case X86EMUL_OKAY: - break; - - case X86EMUL_EXCEPTION: - if ( !ctxt->event_pending ) - goto raise_exn; - /* fallthrough */ - - default: - return rc; - } - - /* This must be an interrupt, trap, or task gate. */ -#ifdef __XEN__ - switch ( (idte.b >> 8) & 0x1f ) - { - case SYS_DESC_irq_gate: - case SYS_DESC_trap_gate: - break; - case SYS_DESC_irq_gate16: - case SYS_DESC_trap_gate16: - case SYS_DESC_task_gate: - if ( !lm ) - break; - /* fall through */ - default: - goto raise_exn; - } -#endif - - /* The 64-bit high half's type must be zero. */ - if ( idte.d & 0x1f00 ) - goto raise_exn; - - /* icebp counts as a hardware event, and bypasses the dpl check. */ - if ( type != x86_swint_icebp ) - { - int cpl = get_cpl(ctxt, ops); - - fail_if(cpl < 0); - - if ( cpl > ((idte.b >> 13) & 3) ) - goto raise_exn; - } - - /* Is this entry present? */ - if ( !(idte.b & (1u << 15)) ) - { - fault_type = EXC_NP; - goto raise_exn; - } - } - } - - x86_emul_software_event(type, vector, insn_len, ctxt); - rc = X86EMUL_OKAY; - - done: - return rc; - - raise_exn: - generate_exception(fault_type, error_code); -} - static void adjust_bnd(struct x86_emulate_ctxt *ctxt, const struct x86_emulate_ops *ops, enum vex_pfx pfx) { @@ -3101,7 +2965,6 @@ x86_emulate( struct operand src = { .reg = PTR_POISON }; struct operand dst = { .reg = PTR_POISON }; unsigned long cr4; - enum x86_swint_type swint_type; struct fpu_insn_ctxt fic = { .type = X86EMUL_FPU_none, .exn_raised = -1 }; struct x86_emulate_stub stub = {}; DECLARE_ALIGNED(mmval_t, mmval); @@ -4103,25 +3966,38 @@ x86_emulate( goto done; break; - case 0xcc: /* int3 */ - src.val = EXC_BP; - swint_type = x86_swint_int3; - goto swint; - - case 0xcd: /* int imm8 */ - swint_type = x86_swint_int; - swint: - rc = inject_swint(swint_type, (uint8_t)src.val, - _regs.r(ip) - ctxt->regs->r(ip), - ctxt, ops) ? : X86EMUL_EXCEPTION; - goto done; - case 0xce: /* into */ if ( !(_regs.eflags & X86_EFLAGS_OF) ) break; - src.val = EXC_OF; - swint_type = x86_swint_into; - goto swint; + /* Fallthrough */ + case 0xcc: /* int3 */ + case 0xcd: /* int imm1 */ + case 0xf1: /* int1 (icebp) */ + ASSERT(!ctxt->event_pending); + switch ( ctxt->opcode ) + { + case 0xcc: /* int3 */ + ctxt->event.vector = EXC_BP; + ctxt->event.type = X86_EVENTTYPE_SW_EXCEPTION; + break; + case 0xcd: /* int imm1 */ + ctxt->event.vector = src.val; + ctxt->event.type = X86_EVENTTYPE_SW_INTERRUPT; + break; + case 0xce: /* into */ + ctxt->event.vector = EXC_OF; + ctxt->event.type = X86_EVENTTYPE_SW_EXCEPTION; + break; + case 0xf1: /* icebp */ + ctxt->event.vector = EXC_DB; + ctxt->event.type = X86_EVENTTYPE_PRI_SW_EXCEPTION; + break; + } + ctxt->event.error_code = X86_EVENT_NO_EC; + ctxt->event.insn_len = _regs.r(ip) - ctxt->regs->r(ip); + ctxt->event_pending = true; + rc = X86EMUL_EXCEPTION; + goto done; case 0xcf: /* iret */ { unsigned long sel, eip, eflags; @@ -4782,11 +4658,6 @@ x86_emulate( goto done; break; - case 0xf1: /* int1 (icebp) */ - src.val = EXC_DB; - swint_type = x86_swint_icebp; - goto swint; - case 0xf4: /* hlt */ generate_exception_if(!mode_ring0(), EXC_GP, 0); ctxt->retire.hlt = true; diff --git a/xen/arch/x86/x86_emulate/x86_emulate.h b/xen/arch/x86/x86_emulate/x86_emulate.h index 9c5fcde..215adf6 100644 --- a/xen/arch/x86/x86_emulate/x86_emulate.h +++ b/xen/arch/x86/x86_emulate/x86_emulate.h @@ -60,27 +60,6 @@ static inline bool is_x86_system_segment(enum x86_segment seg) return seg >= x86_seg_tr && seg < x86_seg_none; } -/* Classification of the types of software generated interrupts/exceptions. */ -enum x86_swint_type { - x86_swint_icebp, /* 0xf1 */ - x86_swint_int3, /* 0xcc */ - x86_swint_into, /* 0xce */ - x86_swint_int, /* 0xcd $n */ -}; - -/* - * How much help is required with software event injection? - * - * All software events return from x86_emulate() with X86EMUL_EXCEPTION and - * fault-like semantics. This just controls whether the emulator performs - * presence/dpl/etc checks and possibly raises exceptions instead. - */ -enum x86_swint_emulation { - x86_swint_emulate_none, /* Hardware supports all software injection properly */ - x86_swint_emulate_icebp,/* Help needed with `icebp` (0xf1) */ - x86_swint_emulate_all, /* Help needed with all software events */ -}; - /* * x86 event types. This enumeration is valid for: * Intel VMX: {VM_ENTRY,VM_EXIT,IDT_VECTORING}_INTR_INFO[10:8] @@ -472,9 +451,6 @@ struct x86_emulate_ctxt * Input-only state: */ - /* Software event injection support. */ - enum x86_swint_emulation swint_emulate; - /* CPU vendor (X86_VENDOR_UNKNOWN for "don't care") */ unsigned char vendor; @@ -699,35 +675,6 @@ static inline void x86_emul_pagefault( ctxt->event_pending = true; } -static inline void x86_emul_software_event( - enum x86_swint_type type, uint8_t vector, uint8_t insn_len, - struct x86_emulate_ctxt *ctxt) -{ - ASSERT(!ctxt->event_pending); - - switch ( type ) - { - case x86_swint_icebp: - ctxt->event.type = X86_EVENTTYPE_PRI_SW_EXCEPTION; - break; - - case x86_swint_int3: - case x86_swint_into: - ctxt->event.type = X86_EVENTTYPE_SW_EXCEPTION; - break; - - case x86_swint_int: - ctxt->event.type = X86_EVENTTYPE_SW_INTERRUPT; - break; - } - - ctxt->event.vector = vector; - ctxt->event.error_code = X86_EVENT_NO_EC; - ctxt->event.insn_len = insn_len; - - ctxt->event_pending = true; -} - static inline void x86_emul_reset_event(struct x86_emulate_ctxt *ctxt) { ctxt->event_pending = false; -- 2.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |