[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v4 06/17] x86emul: support {,V}{LD,ST}MXCSR
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx> --- v4: Drop the host_and_ part from the AVX checks. v3: Re-base. --- a/tools/fuzz/x86_instruction_emulator/x86-insn-emulator-fuzzer.c +++ b/tools/fuzz/x86_instruction_emulator/x86-insn-emulator-fuzzer.c @@ -660,7 +660,7 @@ int LLVMFuzzerTestOneInput(const uint8_t }; int rc; - stack_exec = emul_test_make_stack_executable(); + stack_exec = emul_test_init(); if ( !stack_exec ) { printf("Warning: Stack could not be made executable (%d).\n", errno); --- a/tools/tests/x86_emulator/test_x86_emulator.c +++ b/tools/tests/x86_emulator/test_x86_emulator.c @@ -219,7 +219,7 @@ int main(int argc, char **argv) } instr = (char *)res + 0x100; - stack_exec = emul_test_make_stack_executable(); + stack_exec = emul_test_init(); if ( !stack_exec ) printf("Warning: Stack could not be made executable (%d).\n", errno); @@ -2395,6 +2395,87 @@ int main(int argc, char **argv) goto fail; printf("okay\n"); } + else + printf("skipped\n"); + + printf("%-40s", "Testing stmxcsr (%edx)..."); + if ( cpu_has_sse ) + { + decl_insn(stmxcsr); + + asm volatile ( put_insn(stmxcsr, "stmxcsr (%0)") :: "d" (NULL) ); + + res[0] = 0x12345678; + res[1] = 0x87654321; + asm ( "stmxcsr %0" : "=m" (res[2]) ); + set_insn(stmxcsr); + regs.edx = (unsigned long)res; + rc = x86_emulate(&ctxt, &emulops); + if ( rc != X86EMUL_OKAY || !check_eip(stmxcsr) || + res[0] != res[2] || res[1] != 0x87654321 ) + goto fail; + printf("okay\n"); + } + else + printf("skipped\n"); + + printf("%-40s", "Testing ldmxcsr 4(%ecx)..."); + if ( cpu_has_sse ) + { + decl_insn(ldmxcsr); + + asm volatile ( put_insn(ldmxcsr, "ldmxcsr 4(%0)") :: "c" (NULL) ); + + set_insn(ldmxcsr); + res[1] = mxcsr_mask; + regs.ecx = (unsigned long)res; + rc = x86_emulate(&ctxt, &emulops); + asm ( "stmxcsr %0; ldmxcsr %1" : "=m" (res[0]) : "m" (res[2]) ); + if ( rc != X86EMUL_OKAY || !check_eip(ldmxcsr) || + res[0] != mxcsr_mask ) + goto fail; + printf("okay\n"); + } + else + printf("skipped\n"); + + printf("%-40s", "Testing vstmxcsr (%ecx)..."); + if ( cpu_has_avx ) + { + decl_insn(vstmxcsr); + + asm volatile ( put_insn(vstmxcsr, "vstmxcsr (%0)") :: "c" (NULL) ); + + res[0] = 0x12345678; + res[1] = 0x87654321; + set_insn(vstmxcsr); + regs.ecx = (unsigned long)res; + rc = x86_emulate(&ctxt, &emulops); + if ( rc != X86EMUL_OKAY || !check_eip(vstmxcsr) || + res[0] != res[2] || res[1] != 0x87654321 ) + goto fail; + printf("okay\n"); + } + else + printf("skipped\n"); + + printf("%-40s", "Testing vldmxcsr 4(%edx)..."); + if ( cpu_has_avx ) + { + decl_insn(vldmxcsr); + + asm volatile ( put_insn(vldmxcsr, "vldmxcsr 4(%0)") :: "d" (NULL) ); + + set_insn(vldmxcsr); + res[1] = mxcsr_mask; + regs.edx = (unsigned long)res; + rc = x86_emulate(&ctxt, &emulops); + asm ( "stmxcsr %0; ldmxcsr %1" : "=m" (res[0]) : "m" (res[2]) ); + if ( rc != X86EMUL_OKAY || !check_eip(vldmxcsr) || + res[0] != mxcsr_mask ) + goto fail; + printf("okay\n"); + } else printf("skipped\n"); --- a/tools/tests/x86_emulator/x86_emulate.c +++ b/tools/tests/x86_emulator/x86_emulate.c @@ -22,10 +22,29 @@ #define get_stub(stb) ((void *)((stb).addr = (uintptr_t)(stb).buf)) #define put_stub(stb) -bool emul_test_make_stack_executable(void) +uint32_t mxcsr_mask = 0x0000ffbf; + +bool emul_test_init(void) { unsigned long sp; + if ( cpu_has_fxsr ) + { + static union __attribute__((__aligned__(16))) { + char x[464]; + struct { + uint32_t other[6]; + uint32_t mxcsr; + uint32_t mxcsr_mask; + /* ... */ + }; + } fxs; + + asm ( "fxsave %0" : "=m" (fxs) ); + if ( fxs.mxcsr_mask ) + mxcsr_mask = fxs.mxcsr_mask; + } + /* * Mark the entire stack executable so that the stub executions * don't fault --- a/tools/tests/x86_emulator/x86_emulate.h +++ b/tools/tests/x86_emulator/x86_emulate.h @@ -42,8 +42,10 @@ #define is_canonical_address(x) (((int64_t)(x) >> 47) == ((int64_t)(x) >> 63)) +extern uint32_t mxcsr_mask; + #define MMAP_SZ 16384 -bool emul_test_make_stack_executable(void); +bool emul_test_init(void); #include "x86_emulate/x86_emulate.h" @@ -68,6 +70,12 @@ static inline uint64_t xgetbv(uint32_t x (res.d & (1U << 23)) != 0; \ }) +#define cpu_has_fxsr ({ \ + struct cpuid_leaf res; \ + emul_test_cpuid(1, 0, &res, NULL); \ + (res.d & (1U << 24)) != 0; \ +}) + #define cpu_has_sse ({ \ struct cpuid_leaf res; \ emul_test_cpuid(1, 0, &res, NULL); \ --- a/xen/arch/x86/x86_emulate/x86_emulate.c +++ b/xen/arch/x86/x86_emulate/x86_emulate.c @@ -2173,7 +2173,6 @@ x86_decode_twobyte( case 0x50 ... 0x77: case 0x79 ... 0x7d: case 0x7f: - case 0xae: case 0xc2 ... 0xc3: case 0xc5 ... 0xc6: case 0xd0 ... 0xfe: @@ -2204,6 +2203,24 @@ x86_decode_twobyte( } break; + case 0xae: + ctxt->opcode |= MASK_INSR(vex.pfx, X86EMUL_OPC_PFX_MASK); + /* fall through */ + case X86EMUL_OPC_VEX(0, 0xae): + switch ( modrm_reg & 7 ) + { + case 2: /* {,v}ldmxcsr */ + state->desc = DstImplicit | SrcMem | ModRM | Mov; + op_bytes = 4; + break; + + case 3: /* {,v}stmxcsr */ + state->desc = DstMem | SrcImplicit | ModRM | Mov; + op_bytes = 4; + break; + } + break; + case 0xb8: /* jmpe / popcnt */ if ( rep_prefix() ) ctxt->opcode |= MASK_INSR(vex.pfx, X86EMUL_OPC_PFX_MASK); @@ -6191,6 +6208,23 @@ x86_emulate( case X86EMUL_OPC(0x0f, 0xae): case X86EMUL_OPC_66(0x0f, 0xae): /* Grp15 */ switch ( modrm_reg & 7 ) { + case 2: /* ldmxcsr */ + generate_exception_if(vex.pfx, EXC_UD); + vcpu_must_have(sse); + ldmxcsr: + generate_exception_if(src.type != OP_MEM, EXC_UD); + generate_exception_if(src.val & ~mxcsr_mask, EXC_GP, 0); + asm volatile ( "ldmxcsr %0" :: "m" (src.val) ); + break; + + case 3: /* stmxcsr */ + generate_exception_if(vex.pfx, EXC_UD); + vcpu_must_have(sse); + stmxcsr: + generate_exception_if(dst.type != OP_MEM, EXC_UD); + asm volatile ( "stmxcsr %0" : "=m" (dst.val) ); + break; + case 5: /* lfence */ fail_if(modrm_mod != 3); generate_exception_if(vex.pfx, EXC_UD); @@ -6234,6 +6268,20 @@ x86_emulate( } break; + case X86EMUL_OPC_VEX(0x0f, 0xae): /* Grp15 */ + switch ( modrm_reg & 7 ) + { + case 2: /* vldmxcsr */ + generate_exception_if(vex.l || vex.reg != 0xf, EXC_UD); + vcpu_must_have(avx); + goto ldmxcsr; + case 3: /* vstmxcsr */ + generate_exception_if(vex.l || vex.reg != 0xf, EXC_UD); + vcpu_must_have(avx); + goto stmxcsr; + } + goto cannot_emulate; + case X86EMUL_OPC_F3(0x0f, 0xae): /* Grp15 */ fail_if(modrm_mod != 3); generate_exception_if((modrm_reg & 4) || !mode_64bit(), EXC_UD); --- a/xen/arch/x86/xstate.c +++ b/xen/arch/x86/xstate.c @@ -29,7 +29,7 @@ unsigned int *__read_mostly xstate_sizes u64 __read_mostly xstate_align; static unsigned int __read_mostly xstate_features; -static uint32_t __read_mostly mxcsr_mask = 0x0000ffbf; +uint32_t __read_mostly mxcsr_mask = 0x0000ffbf; /* Cached xcr0 for fast read */ static DEFINE_PER_CPU(uint64_t, xcr0); --- a/xen/include/asm-x86/xstate.h +++ b/xen/include/asm-x86/xstate.h @@ -15,6 +15,8 @@ #define FCW_RESET 0x0040 #define MXCSR_DEFAULT 0x1f80 +extern uint32_t mxcsr_mask; + #define XSTATE_CPUID 0x0000000d #define XCR_XFEATURE_ENABLED_MASK 0x00000000 /* index of XCR0 */ Attachment:
x86emul-SSE-AVX-0f-mxcsr.patch _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx https://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |