[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH RFC v2] x86/emul: Fix the handling of unimplemented Grp7 instructions
Grp7 is abnormally complicated to decode, even by x86's standards, with {s,l}msw being the problematic cases. Previously, any value which fell through the first switch statement (looking for instructions with entirely implicit operands) would be interpreted by the second switch statement (handling instructions with memory operands). Unimplemented instructions would then hit the #UD case for having a non-memory operand, rather than taking the cannot_emulate path. Place a big if/else around the two switch statements (accounting for {s,l}msw which need handling in the else clause), so both switch statments can have a default goto cannot_emulate path. This fixes the emulation of xend, which would hit the #UD path when it should complete with no side effects. Reported-by: Petre Pircalabu <ppircalabu@xxxxxxxxxxxxxxx> Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- CC: Jan Beulich <JBeulich@xxxxxxxx> CC: Petre Pircalabu <ppircalabu@xxxxxxxxxxxxxxx> v2: * Use break rather than goto complete_insn for implicit instructions. * Note that we actually fix the behaviour of xend. RFC as I've only done light testing so far. --- xen/arch/x86/x86_emulate/x86_emulate.c | 356 +++++++++++++++++---------------- 1 file changed, 188 insertions(+), 168 deletions(-) diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c b/xen/arch/x86/x86_emulate/x86_emulate.c index 2201852..27c7ead 100644 --- a/xen/arch/x86/x86_emulate/x86_emulate.c +++ b/xen/arch/x86/x86_emulate/x86_emulate.c @@ -4987,197 +4987,217 @@ x86_emulate( } break; - case X86EMUL_OPC(0x0f, 0x01): /* Grp7 */ { - unsigned long base, limit, cr0, cr0w; + case X86EMUL_OPC(0x0f, 0x01): /* Grp7 */ + { + unsigned long base, limit; - switch( modrm ) + if ( (modrm & 0xc0) == 0xc0 && + (modrm_reg & 7) != 4 /* smsw */ && + (modrm_reg & 7) != 6 /* lmsw */ ) { - case 0xca: /* clac */ - case 0xcb: /* stac */ - vcpu_must_have(smap); - generate_exception_if(vex.pfx || !mode_ring0(), EXC_UD); - - _regs.eflags &= ~X86_EFLAGS_AC; - if ( modrm == 0xcb ) - _regs.eflags |= X86_EFLAGS_AC; - goto complete_insn; + switch ( modrm ) + { + case 0xca: /* clac */ + case 0xcb: /* stac */ + vcpu_must_have(smap); + generate_exception_if(vex.pfx || !mode_ring0(), EXC_UD); + + _regs.eflags &= ~X86_EFLAGS_AC; + if ( modrm == 0xcb ) + _regs.eflags |= X86_EFLAGS_AC; + break; #ifdef __XEN__ - case 0xd1: /* xsetbv */ - generate_exception_if(vex.pfx, EXC_UD); - if ( !ops->read_cr || ops->read_cr(4, &cr4, ctxt) != X86EMUL_OKAY ) - cr4 = 0; - generate_exception_if(!(cr4 & X86_CR4_OSXSAVE), EXC_UD); - generate_exception_if(!mode_ring0() || - handle_xsetbv(_regs.ecx, - _regs.eax | (_regs.rdx << 32)), - EXC_GP, 0); - goto complete_insn; + case 0xd1: /* xsetbv */ + generate_exception_if(vex.pfx, EXC_UD); + if ( !ops->read_cr || + ops->read_cr(4, &cr4, ctxt) != X86EMUL_OKAY ) + cr4 = 0; + generate_exception_if(!(cr4 & X86_CR4_OSXSAVE), EXC_UD); + generate_exception_if(!mode_ring0() || + handle_xsetbv(_regs.ecx, _regs.eax | + (_regs.rdx << 32)), + EXC_GP, 0); + break; #endif - case 0xd4: /* vmfunc */ - generate_exception_if(vex.pfx, EXC_UD); - fail_if(!ops->vmfunc); - if ( (rc = ops->vmfunc(ctxt)) != X86EMUL_OKAY ) - goto done; - goto complete_insn; + case 0xd4: /* vmfunc */ + generate_exception_if(vex.pfx, EXC_UD); + fail_if(!ops->vmfunc); + if ( (rc = ops->vmfunc(ctxt)) != X86EMUL_OKAY ) + goto done; + break; - case 0xd5: /* xend */ - generate_exception_if(vex.pfx, EXC_UD); - generate_exception_if(!vcpu_has_rtm(), EXC_UD); - generate_exception_if(vcpu_has_rtm(), EXC_GP, 0); - break; + case 0xd5: /* xend */ + generate_exception_if(vex.pfx, EXC_UD); + generate_exception_if(!vcpu_has_rtm(), EXC_UD); + generate_exception_if(vcpu_has_rtm(), EXC_GP, 0); + break; - case 0xd6: /* xtest */ - generate_exception_if(vex.pfx, EXC_UD); - generate_exception_if(!vcpu_has_rtm() && !vcpu_has_hle(), - EXC_UD); - /* Neither HLE nor RTM can be active when we get here. */ - _regs.eflags |= X86_EFLAGS_ZF; - goto complete_insn; + case 0xd6: /* xtest */ + generate_exception_if(vex.pfx, EXC_UD); + generate_exception_if(!vcpu_has_rtm() && !vcpu_has_hle(), + EXC_UD); + /* Neither HLE nor RTM can be active when we get here. */ + _regs.eflags |= X86_EFLAGS_ZF; + break; - case 0xdf: /* invlpga */ - generate_exception_if(!in_protmode(ctxt, ops), EXC_UD); - generate_exception_if(!mode_ring0(), EXC_GP, 0); - fail_if(ops->invlpg == NULL); - if ( (rc = ops->invlpg(x86_seg_none, truncate_ea(_regs.r(ax)), - ctxt)) ) - goto done; - goto complete_insn; + case 0xdf: /* invlpga */ + generate_exception_if(!in_protmode(ctxt, ops), EXC_UD); + generate_exception_if(!mode_ring0(), EXC_GP, 0); + fail_if(ops->invlpg == NULL); + if ( (rc = ops->invlpg(x86_seg_none, truncate_ea(_regs.r(ax)), + ctxt)) ) + goto done; + break; - case 0xf9: /* rdtscp */ - fail_if(ops->read_msr == NULL); - if ( (rc = ops->read_msr(MSR_TSC_AUX, - &msr_val, ctxt)) != X86EMUL_OKAY ) - goto done; - _regs.r(cx) = (uint32_t)msr_val; - goto rdtsc; + case 0xf9: /* rdtscp */ + fail_if(ops->read_msr == NULL); + if ( (rc = ops->read_msr(MSR_TSC_AUX, + &msr_val, ctxt)) != X86EMUL_OKAY ) + goto done; + _regs.r(cx) = (uint32_t)msr_val; + goto rdtsc; + + case 0xfc: /* clzero */ + { + unsigned long zero = 0; + + vcpu_must_have(clzero); + + base = ad_bytes == 8 ? _regs.r(ax) : + ad_bytes == 4 ? _regs.eax : _regs.ax; + limit = 0; + if ( vcpu_has_clflush() && + ops->cpuid(1, 0, &cpuid_leaf, ctxt) == X86EMUL_OKAY ) + limit = ((cpuid_leaf.b >> 8) & 0xff) * 8; + generate_exception_if(limit < sizeof(long) || + (limit & (limit - 1)), EXC_UD); + base &= ~(limit - 1); + if ( ops->rep_stos ) + { + unsigned long nr_reps = limit / sizeof(zero); + + rc = ops->rep_stos(&zero, ea.mem.seg, base, sizeof(zero), + &nr_reps, ctxt); + if ( rc == X86EMUL_OKAY ) + { + base += nr_reps * sizeof(zero); + limit -= nr_reps * sizeof(zero); + } + else if ( rc != X86EMUL_UNHANDLEABLE ) + goto done; + } + fail_if(limit && !ops->write); + while ( limit ) + { + rc = ops->write(ea.mem.seg, base, &zero, + sizeof(zero), ctxt); + if ( rc != X86EMUL_OKAY ) + goto done; + base += sizeof(zero); + limit -= sizeof(zero); + } + break; + } - case 0xfc: /* clzero */ + default: + goto cannot_emulate; + } + } + else { - unsigned long zero = 0; + unsigned long cr0, cr0w; - vcpu_must_have(clzero); + seg = (modrm_reg & 1) ? x86_seg_idtr : x86_seg_gdtr; - base = ad_bytes == 8 ? _regs.r(ax) : - ad_bytes == 4 ? _regs.eax : _regs.ax; - limit = 0; - if ( vcpu_has_clflush() && - ops->cpuid(1, 0, &cpuid_leaf, ctxt) == X86EMUL_OKAY ) - limit = ((cpuid_leaf.b >> 8) & 0xff) * 8; - generate_exception_if(limit < sizeof(long) || - (limit & (limit - 1)), EXC_UD); - base &= ~(limit - 1); - if ( ops->rep_stos ) + switch ( modrm_reg & 7 ) { - unsigned long nr_reps = limit / sizeof(zero); + case 0: /* sgdt */ + case 1: /* sidt */ + generate_exception_if(ea.type != OP_MEM, EXC_UD); + generate_exception_if(umip_active(ctxt, ops), EXC_GP, 0); + fail_if(!ops->read_segment || !ops->write); + if ( (rc = ops->read_segment(seg, &sreg, ctxt)) ) + goto done; + if ( mode_64bit() ) + op_bytes = 8; + else if ( op_bytes == 2 ) + { + sreg.base &= 0xffffff; + op_bytes = 4; + } + if ( (rc = ops->write(ea.mem.seg, ea.mem.off, &sreg.limit, + 2, ctxt)) != X86EMUL_OKAY || + (rc = ops->write(ea.mem.seg, ea.mem.off + 2, &sreg.base, + op_bytes, ctxt)) != X86EMUL_OKAY ) + goto done; + break; + + case 2: /* lgdt */ + case 3: /* lidt */ + generate_exception_if(!mode_ring0(), EXC_GP, 0); + generate_exception_if(ea.type != OP_MEM, EXC_UD); + fail_if(ops->write_segment == NULL); + memset(&sreg, 0, sizeof(sreg)); + if ( (rc = read_ulong(ea.mem.seg, ea.mem.off+0, + &limit, 2, ctxt, ops)) || + (rc = read_ulong(ea.mem.seg, ea.mem.off+2, + &base, mode_64bit() ? 8 : 4, ctxt, ops)) ) + goto done; + generate_exception_if(!is_canonical_address(base), EXC_GP, 0); + sreg.base = base; + sreg.limit = limit; + if ( !mode_64bit() && op_bytes == 2 ) + sreg.base &= 0xffffff; + if ( (rc = ops->write_segment(seg, &sreg, ctxt)) ) + goto done; + break; - rc = ops->rep_stos(&zero, ea.mem.seg, base, sizeof(zero), - &nr_reps, ctxt); - if ( rc == X86EMUL_OKAY ) + case 4: /* smsw */ + generate_exception_if(umip_active(ctxt, ops), EXC_GP, 0); + if ( ea.type == OP_MEM ) { - base += nr_reps * sizeof(zero); - limit -= nr_reps * sizeof(zero); + fail_if(!ops->write); + d |= Mov; /* force writeback */ + ea.bytes = 2; } - else if ( rc != X86EMUL_UNHANDLEABLE ) + else + ea.bytes = op_bytes; + dst = ea; + fail_if(ops->read_cr == NULL); + if ( (rc = ops->read_cr(0, &dst.val, ctxt)) ) goto done; - } - fail_if(limit && !ops->write); - while ( limit ) - { - rc = ops->write(ea.mem.seg, base, &zero, sizeof(zero), ctxt); - if ( rc != X86EMUL_OKAY ) + break; + + case 6: /* lmsw */ + fail_if(ops->read_cr == NULL); + fail_if(ops->write_cr == NULL); + generate_exception_if(!mode_ring0(), EXC_GP, 0); + if ( (rc = ops->read_cr(0, &cr0, ctxt)) ) goto done; - base += sizeof(zero); - limit -= sizeof(zero); - } - goto complete_insn; - } - } + if ( ea.type == OP_REG ) + cr0w = *ea.reg; + else if ( (rc = read_ulong(ea.mem.seg, ea.mem.off, + &cr0w, 2, ctxt, ops)) ) + goto done; + /* LMSW can: (1) set bits 0-3; (2) clear bits 1-3. */ + cr0 = (cr0 & ~0xe) | (cr0w & 0xf); + if ( (rc = ops->write_cr(0, cr0, ctxt)) ) + goto done; + break; - seg = (modrm_reg & 1) ? x86_seg_idtr : x86_seg_gdtr; + case 7: /* invlpg */ + generate_exception_if(!mode_ring0(), EXC_GP, 0); + generate_exception_if(ea.type != OP_MEM, EXC_UD); + fail_if(ops->invlpg == NULL); + if ( (rc = ops->invlpg(ea.mem.seg, ea.mem.off, ctxt)) ) + goto done; + break; - switch ( modrm_reg & 7 ) - { - case 0: /* sgdt */ - case 1: /* sidt */ - generate_exception_if(ea.type != OP_MEM, EXC_UD); - generate_exception_if(umip_active(ctxt, ops), EXC_GP, 0); - fail_if(!ops->read_segment || !ops->write); - if ( (rc = ops->read_segment(seg, &sreg, ctxt)) ) - goto done; - if ( mode_64bit() ) - op_bytes = 8; - else if ( op_bytes == 2 ) - { - sreg.base &= 0xffffff; - op_bytes = 4; - } - if ( (rc = ops->write(ea.mem.seg, ea.mem.off, &sreg.limit, - 2, ctxt)) != X86EMUL_OKAY || - (rc = ops->write(ea.mem.seg, ea.mem.off + 2, &sreg.base, - op_bytes, ctxt)) != X86EMUL_OKAY ) - goto done; - break; - case 2: /* lgdt */ - case 3: /* lidt */ - generate_exception_if(!mode_ring0(), EXC_GP, 0); - generate_exception_if(ea.type != OP_MEM, EXC_UD); - fail_if(ops->write_segment == NULL); - memset(&sreg, 0, sizeof(sreg)); - if ( (rc = read_ulong(ea.mem.seg, ea.mem.off+0, - &limit, 2, ctxt, ops)) || - (rc = read_ulong(ea.mem.seg, ea.mem.off+2, - &base, mode_64bit() ? 8 : 4, ctxt, ops)) ) - goto done; - generate_exception_if(!is_canonical_address(base), EXC_GP, 0); - sreg.base = base; - sreg.limit = limit; - if ( !mode_64bit() && op_bytes == 2 ) - sreg.base &= 0xffffff; - if ( (rc = ops->write_segment(seg, &sreg, ctxt)) ) - goto done; - break; - case 4: /* smsw */ - generate_exception_if(umip_active(ctxt, ops), EXC_GP, 0); - if ( ea.type == OP_MEM ) - { - fail_if(!ops->write); - d |= Mov; /* force writeback */ - ea.bytes = 2; + default: + goto cannot_emulate; } - else - ea.bytes = op_bytes; - dst = ea; - fail_if(ops->read_cr == NULL); - if ( (rc = ops->read_cr(0, &dst.val, ctxt)) ) - goto done; - break; - case 6: /* lmsw */ - fail_if(ops->read_cr == NULL); - fail_if(ops->write_cr == NULL); - generate_exception_if(!mode_ring0(), EXC_GP, 0); - if ( (rc = ops->read_cr(0, &cr0, ctxt)) ) - goto done; - if ( ea.type == OP_REG ) - cr0w = *ea.reg; - else if ( (rc = read_ulong(ea.mem.seg, ea.mem.off, - &cr0w, 2, ctxt, ops)) ) - goto done; - /* LMSW can: (1) set bits 0-3; (2) clear bits 1-3. */ - cr0 = (cr0 & ~0xe) | (cr0w & 0xf); - if ( (rc = ops->write_cr(0, cr0, ctxt)) ) - goto done; - break; - case 7: /* invlpg */ - generate_exception_if(!mode_ring0(), EXC_GP, 0); - generate_exception_if(ea.type != OP_MEM, EXC_UD); - fail_if(ops->invlpg == NULL); - if ( (rc = ops->invlpg(ea.mem.seg, ea.mem.off, ctxt)) ) - goto done; - break; - default: - goto cannot_emulate; } break; } -- 2.1.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |