|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH 02/17] x86emul: re-order cases of main switch statement
On Wed, Jun 21, 2017 at 12:59 PM, Jan Beulich <JBeulich@xxxxxxxx> wrote:
> Re-store intended numerical ordering, which has become "violated"
> mostly by incremental additions where moving around bigger chunks did
> not seem advisable. One exception though at the very top of the
> switch(): Keeping the arithmetic ops together seems preferable over
> entirely strict ordering.
+1
> Additionally move a few macro definitions before their first uses (the
> placement is benign as long as those uses are themselves only macro
> definitions, but that's going to change when those macros have helpers
> broken out).
>
> No (intended) functional change.
>
> Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
As far as I can tell, the 'no functional change' is true:
Reviewed-by: George Dunlap <george.dunlap@xxxxxxxxxx>
>
> --- a/xen/arch/x86/x86_emulate/x86_emulate.c
> +++ b/xen/arch/x86/x86_emulate/x86_emulate.c
> @@ -843,6 +843,27 @@ do{ asm volatile (
> #define __emulate_1op_8byte(_op, _dst, _eflags)
> #endif /* __i386__ */
>
> +#define fail_if(p) \
> +do { \
> + rc = (p) ? X86EMUL_UNHANDLEABLE : X86EMUL_OKAY; \
> + if ( rc ) goto done; \
> +} while (0)
> +
> +static inline int mkec(uint8_t e, int32_t ec, ...)
> +{
> + return (e < 32 && ((1u << e) & EXC_HAS_EC)) ? ec : X86_EVENT_NO_EC;
> +}
> +
> +#define generate_exception_if(p, e, ec...) \
> +({ if ( (p) ) { \
> + x86_emul_hw_exception(e, mkec(e, ##ec, 0), ctxt); \
> + rc = X86EMUL_EXCEPTION; \
> + goto done; \
> + } \
> +})
> +
> +#define generate_exception(e, ec...) generate_exception_if(true, e, ##ec)
> +
> #ifdef __XEN__
> # define invoke_stub(pre, post, constraints...) do { \
> union stub_exception_token res_ = { .raw = ~0 }; \
> @@ -911,27 +932,6 @@ do{ asm volatile (
> # define mode_64bit() false
> #endif
>
> -#define fail_if(p) \
> -do { \
> - rc = (p) ? X86EMUL_UNHANDLEABLE : X86EMUL_OKAY; \
> - if ( rc ) goto done; \
> -} while (0)
> -
> -static inline int mkec(uint8_t e, int32_t ec, ...)
> -{
> - return (e < 32 && ((1u << e) & EXC_HAS_EC)) ? ec : X86_EVENT_NO_EC;
> -}
> -
> -#define generate_exception_if(p, e, ec...) \
> -({ if ( (p) ) { \
> - x86_emul_hw_exception(e, mkec(e, ##ec, 0), ctxt); \
> - rc = X86EMUL_EXCEPTION; \
> - goto done; \
> - } \
> -})
> -
> -#define generate_exception(e, ec...) generate_exception_if(true, e, ##ec)
> -
> /*
> * Given byte has even parity (even number of 1s)? SDM Vol. 1 Sec. 3.4.3.1,
> * "Status Flags": EFLAGS.PF reflects parity of least-sig. byte of result
> only.
> @@ -3596,6 +3596,11 @@ x86_emulate(
> dst.bytes = 2;
> break;
>
> + case 0x8d: /* lea */
> + generate_exception_if(ea.type != OP_MEM, EXC_UD);
> + dst.val = ea.mem.off;
> + break;
> +
> case 0x8e: /* mov r/m,Sreg */
> seg = modrm_reg & 7; /* REX.R is ignored. */
> generate_exception_if(!is_x86_user_segment(seg) ||
> @@ -3607,11 +3612,6 @@ x86_emulate(
> dst.type = OP_NONE;
> break;
>
> - case 0x8d: /* lea */
> - generate_exception_if(ea.type != OP_MEM, EXC_UD);
> - dst.val = ea.mem.off;
> - break;
> -
> case 0x8f: /* pop (sole member of Grp1a) */
> generate_exception_if((modrm_reg & 7) != 0, EXC_UD);
> /* 64-bit mode: POP defaults to a 64-bit operand. */
> @@ -5746,12 +5746,6 @@ x86_emulate(
> _regs.r(ax) = (uint32_t)msr_val;
> break;
>
> - case X86EMUL_OPC(0x0f, 0x40) ... X86EMUL_OPC(0x0f, 0x4f): /* cmovcc */
> - vcpu_must_have(cmov);
> - if ( test_cc(b, _regs.eflags) )
> - dst.val = src.val;
> - break;
> -
> case X86EMUL_OPC(0x0f, 0x34): /* sysenter */
> vcpu_must_have(sep);
> generate_exception_if(mode_ring0(), EXC_GP, 0);
> @@ -5834,6 +5828,12 @@ x86_emulate(
> singlestep = _regs.eflags & X86_EFLAGS_TF;
> break;
>
> + case X86EMUL_OPC(0x0f, 0x40) ... X86EMUL_OPC(0x0f, 0x4f): /* cmovcc */
> + vcpu_must_have(cmov);
> + if ( test_cc(b, _regs.eflags) )
> + dst.val = src.val;
> + break;
> +
> CASE_SIMD_PACKED_FP(, 0x0f, 0x50): /* movmskp{s,d} xmm,reg */
> CASE_SIMD_PACKED_FP(_VEX, 0x0f, 0x50): /* vmovmskp{s,d} {x,y}mm,reg */
> CASE_SIMD_PACKED_INT(0x0f, 0xd7): /* pmovmskb {,x}mm,reg */
> @@ -6050,10 +6050,6 @@ x86_emulate(
> get_fpu(X86EMUL_FPU_mmx, &fic);
> goto simd_0f_common;
>
> - case X86EMUL_OPC_VEX_66(0x0f38, 0x41): /* vphminposuw xmm/m128,xmm,xmm */
> - generate_exception_if(vex.l, EXC_UD);
> - goto simd_0f_avx;
> -
> CASE_SIMD_PACKED_INT(0x0f, 0x6e): /* mov{d,q} r/m,{,x}mm */
> case X86EMUL_OPC_VEX_66(0x0f, 0x6e): /* vmov{d,q} r/m,xmm */
> CASE_SIMD_PACKED_INT(0x0f, 0x7e): /* mov{d,q} {,x}mm,r/m */
> @@ -6351,12 +6347,6 @@ x86_emulate(
> op_bytes = 8;
> goto simd_0f_xmm;
>
> - case X86EMUL_OPC_F3(0x0f, 0x7e): /* movq xmm/m64,xmm */
> - case X86EMUL_OPC_VEX_F3(0x0f, 0x7e): /* vmovq xmm/m64,xmm */
> - generate_exception_if(vex.l, EXC_UD);
> - op_bytes = 8;
> - goto simd_0f_int;
> -
> case X86EMUL_OPC_F2(0x0f, 0xf0): /* lddqu m128,xmm */
> case X86EMUL_OPC_VEX_F2(0x0f, 0xf0): /* vlddqu mem,{x,y}mm */
> generate_exception_if(ea.type != OP_MEM, EXC_UD);
> @@ -6376,6 +6366,12 @@ x86_emulate(
> op_bytes = 16 << vex.l;
> goto simd_0f_sse3_avx;
>
> + case X86EMUL_OPC_F3(0x0f, 0x7e): /* movq xmm/m64,xmm */
> + case X86EMUL_OPC_VEX_F3(0x0f, 0x7e): /* vmovq xmm/m64,xmm */
> + generate_exception_if(vex.l, EXC_UD);
> + op_bytes = 8;
> + goto simd_0f_int;
> +
> case X86EMUL_OPC(0x0f, 0x80) ... X86EMUL_OPC(0x0f, 0x8f): /* jcc (near)
> */
> if ( test_cc(b, _regs.eflags) )
> jmp_rel((int32_t)src.val);
> @@ -7134,39 +7130,6 @@ x86_emulate(
> generate_exception_if(vex.w, EXC_UD);
> goto simd_0f_avx;
>
> - case X86EMUL_OPC_66(0x0f38, 0x20): /* pmovsxbw xmm/m64,xmm */
> - case X86EMUL_OPC_66(0x0f38, 0x21): /* pmovsxbd xmm/m32,xmm */
> - case X86EMUL_OPC_66(0x0f38, 0x22): /* pmovsxbq xmm/m16,xmm */
> - case X86EMUL_OPC_66(0x0f38, 0x23): /* pmovsxwd xmm/m64,xmm */
> - case X86EMUL_OPC_66(0x0f38, 0x24): /* pmovsxwq xmm/m32,xmm */
> - case X86EMUL_OPC_66(0x0f38, 0x25): /* pmovsxdq xmm/m64,xmm */
> - case X86EMUL_OPC_66(0x0f38, 0x30): /* pmovzxbw xmm/m64,xmm */
> - case X86EMUL_OPC_66(0x0f38, 0x31): /* pmovzxbd xmm/m32,xmm */
> - case X86EMUL_OPC_66(0x0f38, 0x32): /* pmovzxbq xmm/m16,xmm */
> - case X86EMUL_OPC_66(0x0f38, 0x33): /* pmovzxwd xmm/m64,xmm */
> - case X86EMUL_OPC_66(0x0f38, 0x34): /* pmovzxwq xmm/m32,xmm */
> - case X86EMUL_OPC_66(0x0f38, 0x35): /* pmovzxdq xmm/m64,xmm */
> - op_bytes = 16 >> pmov_convert_delta[b & 7];
> - /* fall through */
> - case X86EMUL_OPC_66(0x0f38, 0x10): /* pblendvb XMM0,xmm/m128,xmm */
> - case X86EMUL_OPC_66(0x0f38, 0x14): /* blendvps XMM0,xmm/m128,xmm */
> - case X86EMUL_OPC_66(0x0f38, 0x15): /* blendvpd XMM0,xmm/m128,xmm */
> - case X86EMUL_OPC_66(0x0f38, 0x28): /* pmuldq xmm/m128,xmm */
> - case X86EMUL_OPC_66(0x0f38, 0x29): /* pcmpeqq xmm/m128,xmm */
> - case X86EMUL_OPC_66(0x0f38, 0x2b): /* packusdw xmm/m128,xmm */
> - case X86EMUL_OPC_66(0x0f38, 0x38): /* pminsb xmm/m128,xmm */
> - case X86EMUL_OPC_66(0x0f38, 0x39): /* pminsd xmm/m128,xmm */
> - case X86EMUL_OPC_66(0x0f38, 0x3a): /* pminub xmm/m128,xmm */
> - case X86EMUL_OPC_66(0x0f38, 0x3b): /* pminud xmm/m128,xmm */
> - case X86EMUL_OPC_66(0x0f38, 0x3c): /* pmaxsb xmm/m128,xmm */
> - case X86EMUL_OPC_66(0x0f38, 0x3d): /* pmaxsd xmm/m128,xmm */
> - case X86EMUL_OPC_66(0x0f38, 0x3e): /* pmaxub xmm/m128,xmm */
> - case X86EMUL_OPC_66(0x0f38, 0x3f): /* pmaxud xmm/m128,xmm */
> - case X86EMUL_OPC_66(0x0f38, 0x40): /* pmulld xmm/m128,xmm */
> - case X86EMUL_OPC_66(0x0f38, 0x41): /* phminposuw xmm/m128,xmm */
> - host_and_vcpu_must_have(sse4_1);
> - goto simd_0f38_common;
> -
> case X86EMUL_OPC_VEX_66(0x0f38, 0x0e): /* vtestps {x,y}mm/mem,{x,y}mm */
> case X86EMUL_OPC_VEX_66(0x0f38, 0x0f): /* vtestpd {x,y}mm/mem,{x,y}mm */
> generate_exception_if(vex.w, EXC_UD);
> @@ -7220,6 +7183,39 @@ x86_emulate(
> dst.type = OP_NONE;
> break;
>
> + case X86EMUL_OPC_66(0x0f38, 0x20): /* pmovsxbw xmm/m64,xmm */
> + case X86EMUL_OPC_66(0x0f38, 0x21): /* pmovsxbd xmm/m32,xmm */
> + case X86EMUL_OPC_66(0x0f38, 0x22): /* pmovsxbq xmm/m16,xmm */
> + case X86EMUL_OPC_66(0x0f38, 0x23): /* pmovsxwd xmm/m64,xmm */
> + case X86EMUL_OPC_66(0x0f38, 0x24): /* pmovsxwq xmm/m32,xmm */
> + case X86EMUL_OPC_66(0x0f38, 0x25): /* pmovsxdq xmm/m64,xmm */
> + case X86EMUL_OPC_66(0x0f38, 0x30): /* pmovzxbw xmm/m64,xmm */
> + case X86EMUL_OPC_66(0x0f38, 0x31): /* pmovzxbd xmm/m32,xmm */
> + case X86EMUL_OPC_66(0x0f38, 0x32): /* pmovzxbq xmm/m16,xmm */
> + case X86EMUL_OPC_66(0x0f38, 0x33): /* pmovzxwd xmm/m64,xmm */
> + case X86EMUL_OPC_66(0x0f38, 0x34): /* pmovzxwq xmm/m32,xmm */
> + case X86EMUL_OPC_66(0x0f38, 0x35): /* pmovzxdq xmm/m64,xmm */
> + op_bytes = 16 >> pmov_convert_delta[b & 7];
> + /* fall through */
> + case X86EMUL_OPC_66(0x0f38, 0x10): /* pblendvb XMM0,xmm/m128,xmm */
> + case X86EMUL_OPC_66(0x0f38, 0x14): /* blendvps XMM0,xmm/m128,xmm */
> + case X86EMUL_OPC_66(0x0f38, 0x15): /* blendvpd XMM0,xmm/m128,xmm */
> + case X86EMUL_OPC_66(0x0f38, 0x28): /* pmuldq xmm/m128,xmm */
> + case X86EMUL_OPC_66(0x0f38, 0x29): /* pcmpeqq xmm/m128,xmm */
> + case X86EMUL_OPC_66(0x0f38, 0x2b): /* packusdw xmm/m128,xmm */
> + case X86EMUL_OPC_66(0x0f38, 0x38): /* pminsb xmm/m128,xmm */
> + case X86EMUL_OPC_66(0x0f38, 0x39): /* pminsd xmm/m128,xmm */
> + case X86EMUL_OPC_66(0x0f38, 0x3a): /* pminub xmm/m128,xmm */
> + case X86EMUL_OPC_66(0x0f38, 0x3b): /* pminud xmm/m128,xmm */
> + case X86EMUL_OPC_66(0x0f38, 0x3c): /* pmaxsb xmm/m128,xmm */
> + case X86EMUL_OPC_66(0x0f38, 0x3d): /* pmaxsd xmm/m128,xmm */
> + case X86EMUL_OPC_66(0x0f38, 0x3e): /* pmaxub xmm/m128,xmm */
> + case X86EMUL_OPC_66(0x0f38, 0x3f): /* pmaxud xmm/m128,xmm */
> + case X86EMUL_OPC_66(0x0f38, 0x40): /* pmulld xmm/m128,xmm */
> + case X86EMUL_OPC_66(0x0f38, 0x41): /* phminposuw xmm/m128,xmm */
> + host_and_vcpu_must_have(sse4_1);
> + goto simd_0f38_common;
> +
> case X86EMUL_OPC_VEX_66(0x0f38, 0x20): /* vpmovsxbw xmm/mem,{x,y}mm */
> case X86EMUL_OPC_VEX_66(0x0f38, 0x21): /* vpmovsxbd xmm/mem,{x,y}mm */
> case X86EMUL_OPC_VEX_66(0x0f38, 0x22): /* vpmovsxbq xmm/mem,{x,y}mm */
> @@ -7318,16 +7314,6 @@ x86_emulate(
> host_and_vcpu_must_have(sse4_2);
> goto simd_0f38_common;
>
> - case X86EMUL_OPC(0x0f38, 0xc8): /* sha1nexte xmm/m128,xmm */
> - case X86EMUL_OPC(0x0f38, 0xc9): /* sha1msg1 xmm/m128,xmm */
> - case X86EMUL_OPC(0x0f38, 0xca): /* sha1msg2 xmm/m128,xmm */
> - case X86EMUL_OPC(0x0f38, 0xcb): /* sha256rnds2 XMM0,xmm/m128,xmm */
> - case X86EMUL_OPC(0x0f38, 0xcc): /* sha256msg1 xmm/m128,xmm */
> - case X86EMUL_OPC(0x0f38, 0xcd): /* sha256msg2 xmm/m128,xmm */
> - host_and_vcpu_must_have(sha);
> - op_bytes = 16;
> - goto simd_0f38_common;
> -
> case X86EMUL_OPC_66(0x0f38, 0xdb): /* aesimc xmm/m128,xmm */
> case X86EMUL_OPC_VEX_66(0x0f38, 0xdb): /* vaesimc xmm/m128,xmm */
> case X86EMUL_OPC_66(0x0f38, 0xdc): /* aesenc xmm/m128,xmm,xmm */
> @@ -7341,9 +7327,21 @@ x86_emulate(
> host_and_vcpu_must_have(aesni);
> if ( vex.opcx == vex_none )
> goto simd_0f38_common;
> + /* fall through */
> + case X86EMUL_OPC_VEX_66(0x0f38, 0x41): /* vphminposuw xmm/m128,xmm,xmm */
> generate_exception_if(vex.l, EXC_UD);
> goto simd_0f_avx;
>
> + case X86EMUL_OPC(0x0f38, 0xc8): /* sha1nexte xmm/m128,xmm */
> + case X86EMUL_OPC(0x0f38, 0xc9): /* sha1msg1 xmm/m128,xmm */
> + case X86EMUL_OPC(0x0f38, 0xca): /* sha1msg2 xmm/m128,xmm */
> + case X86EMUL_OPC(0x0f38, 0xcb): /* sha256rnds2 XMM0,xmm/m128,xmm */
> + case X86EMUL_OPC(0x0f38, 0xcc): /* sha256msg1 xmm/m128,xmm */
> + case X86EMUL_OPC(0x0f38, 0xcd): /* sha256msg2 xmm/m128,xmm */
> + host_and_vcpu_must_have(sha);
> + op_bytes = 16;
> + goto simd_0f38_common;
> +
> case X86EMUL_OPC(0x0f38, 0xf0): /* movbe m,r */
> case X86EMUL_OPC(0x0f38, 0xf1): /* movbe r,m */
> vcpu_must_have(movbe);
> @@ -7519,6 +7517,19 @@ x86_emulate(
> generate_exception_if(vex.w, EXC_UD);
> goto simd_0f_imm8_avx;
>
> + case X86EMUL_OPC_66(0x0f3a, 0x08): /* roundps $imm8,xmm/m128,xmm */
> + case X86EMUL_OPC_66(0x0f3a, 0x09): /* roundpd $imm8,xmm/m128,xmm */
> + case X86EMUL_OPC_66(0x0f3a, 0x0a): /* roundss $imm8,xmm/m128,xmm */
> + case X86EMUL_OPC_66(0x0f3a, 0x0b): /* roundsd $imm8,xmm/m128,xmm */
> + case X86EMUL_OPC_66(0x0f3a, 0x0c): /* blendps $imm8,xmm/m128,xmm */
> + case X86EMUL_OPC_66(0x0f3a, 0x0d): /* blendpd $imm8,xmm/m128,xmm */
> + case X86EMUL_OPC_66(0x0f3a, 0x0e): /* pblendw $imm8,xmm/m128,xmm */
> + case X86EMUL_OPC_66(0x0f3a, 0x40): /* dpps $imm8,xmm/m128,xmm */
> + case X86EMUL_OPC_66(0x0f3a, 0x41): /* dppd $imm8,xmm/m128,xmm */
> + case X86EMUL_OPC_66(0x0f3a, 0x42): /* mpsadbw $imm8,xmm/m128,xmm */
> + host_and_vcpu_must_have(sse4_1);
> + goto simd_0f3a_common;
> +
> case X86EMUL_OPC(0x0f3a, 0x0f): /* palignr $imm8,mm/m64,mm */
> case X86EMUL_OPC_66(0x0f3a, 0x0f): /* palignr $imm8,xmm/m128,xmm */
> host_and_vcpu_must_have(ssse3);
> @@ -7547,19 +7558,6 @@ x86_emulate(
> fic.insn_bytes = PFX_BYTES + 4;
> break;
>
> - case X86EMUL_OPC_66(0x0f3a, 0x08): /* roundps $imm8,xmm/m128,xmm */
> - case X86EMUL_OPC_66(0x0f3a, 0x09): /* roundpd $imm8,xmm/m128,xmm */
> - case X86EMUL_OPC_66(0x0f3a, 0x0a): /* roundss $imm8,xmm/m128,xmm */
> - case X86EMUL_OPC_66(0x0f3a, 0x0b): /* roundsd $imm8,xmm/m128,xmm */
> - case X86EMUL_OPC_66(0x0f3a, 0x0c): /* blendps $imm8,xmm/m128,xmm */
> - case X86EMUL_OPC_66(0x0f3a, 0x0d): /* blendpd $imm8,xmm/m128,xmm */
> - case X86EMUL_OPC_66(0x0f3a, 0x0e): /* pblendw $imm8,xmm/m128,xmm */
> - case X86EMUL_OPC_66(0x0f3a, 0x40): /* dpps $imm8,xmm/m128,xmm */
> - case X86EMUL_OPC_66(0x0f3a, 0x41): /* dppd $imm8,xmm/m128,xmm */
> - case X86EMUL_OPC_66(0x0f3a, 0x42): /* mpsadbw $imm8,xmm/m128,xmm */
> - host_and_vcpu_must_have(sse4_1);
> - goto simd_0f3a_common;
> -
> case X86EMUL_OPC_66(0x0f3a, 0x14): /* pextrb $imm8,xmm,r/m */
> case X86EMUL_OPC_66(0x0f3a, 0x15): /* pextrw $imm8,xmm,r/m */
> case X86EMUL_OPC_66(0x0f3a, 0x16): /* pextr{d,q} $imm8,xmm,r/m */
>
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@xxxxxxxxxxxxx
> https://lists.xen.org/xen-devel
>
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |