|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH v8 42/50] x86emul: support AVX512_4VNNIW insns
As in a few cases before, since the insns here and in particular their
memory access patterns follow the AVX512_4FMAPS scheme, I didn't think
it was necessary to add contrived tests specifically for them, beyond
the Disp8 scaling ones.
Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>
---
v8: Correct vcpu_has_*() insertion point.
v7: Re-base.
v6: New.
--- a/tools/tests/x86_emulator/evex-disp8.c
+++ b/tools/tests/x86_emulator/evex-disp8.c
@@ -545,6 +545,11 @@ static const struct test avx512_4fmaps_5
INSN(4fnmaddss, f2, 0f38, ab, el_4, d, vl),
};
+static const struct test avx512_4vnniw_512[] = {
+ INSN(p4dpwssd, f2, 0f38, 52, el_4, d, vl),
+ INSN(p4dpwssds, f2, 0f38, 53, el_4, d, vl),
+};
+
static const struct test avx512_bitalg_all[] = {
INSN(popcnt, 66, 0f38, 54, vl, bw, vl),
INSN(pshufbitqmb, 66, 0f38, 8f, vl, b, vl),
@@ -949,6 +954,7 @@ void evex_disp8_test(void *instr, struct
#define cpu_has_avx512pf cpu_has_avx512f
RUN(avx512pf, 512);
RUN(avx512_4fmaps, 512);
+ RUN(avx512_4vnniw, 512);
RUN(avx512_bitalg, all);
RUN(avx512_ifma, all);
RUN(avx512_vbmi, all);
--- a/tools/tests/x86_emulator/x86-emulate.h
+++ b/tools/tests/x86_emulator/x86-emulate.h
@@ -146,6 +146,7 @@ static inline bool xcr0_mask(uint64_t ma
#define cpu_has_avx512_vbmi2 (cp.feat.avx512_vbmi2 && xcr0_mask(0xe6))
#define cpu_has_avx512_bitalg (cp.feat.avx512_bitalg && xcr0_mask(0xe6))
#define cpu_has_avx512_vpopcntdq (cp.feat.avx512_vpopcntdq && xcr0_mask(0xe6))
+#define cpu_has_avx512_4vnniw (cp.feat.avx512_4vnniw && xcr0_mask(0xe6))
#define cpu_has_avx512_4fmaps (cp.feat.avx512_4fmaps && xcr0_mask(0xe6))
#define cpu_has_xgetbv1 (cpu_has_xsave && cp.xstate.xgetbv1)
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -479,6 +479,7 @@ static const struct ext0f38_table {
[0x4d] = { .simd_size = simd_scalar_vexw, .d8s = d8s_dq },
[0x4e] = { .simd_size = simd_packed_fp, .two_op = 1, .d8s = d8s_vl },
[0x4f] = { .simd_size = simd_scalar_vexw, .d8s = d8s_dq },
+ [0x52 ... 0x53] = { .simd_size = simd_128, .d8s = 4 },
[0x54 ... 0x55] = { .simd_size = simd_packed_int, .two_op = 1, .d8s =
d8s_vl },
[0x58] = { .simd_size = simd_other, .two_op = 1, .d8s = 2 },
[0x59] = { .simd_size = simd_other, .two_op = 1, .d8s = 3 },
@@ -1924,6 +1925,7 @@ static bool vcpu_has(
#define vcpu_has_avx512_bitalg() vcpu_has( 7, ECX, 12, ctxt, ops)
#define vcpu_has_avx512_vpopcntdq() vcpu_has( 7, ECX, 14, ctxt, ops)
#define vcpu_has_rdpid() vcpu_has( 7, ECX, 22, ctxt, ops)
+#define vcpu_has_avx512_4vnniw() vcpu_has( 7, EDX, 2, ctxt, ops)
#define vcpu_has_avx512_4fmaps() vcpu_has( 7, EDX, 3, ctxt, ops)
#define vcpu_has_clzero() vcpu_has(0x80000008, EBX, 0, ctxt, ops)
@@ -8944,6 +8946,15 @@ x86_emulate(
generate_exception_if(vex.l, EXC_UD);
goto simd_0f_avx;
+ case X86EMUL_OPC_EVEX_F2(0x0f38, 0x52): /* vp4dpwssd m128,zmm+3,zmm{k} */
+ case X86EMUL_OPC_EVEX_F2(0x0f38, 0x53): /* vp4dpwssds m128,zmm+3,zmm{k} */
+ host_and_vcpu_must_have(avx512_4vnniw);
+ generate_exception_if((ea.type != OP_MEM || evex.w || evex.brs ||
+ evex.lr != 2),
+ EXC_UD);
+ op_mask = op_mask & 0xffff ? 0xf : 0;
+ goto simd_zmm;
+
case X86EMUL_OPC_EVEX_66(0x0f38, 0x8f): /* vpshufbitqmb
[xyz]mm/mem,[xyz]mm,k{k} */
generate_exception_if(evex.w || !evex.r || !evex.R || evex.z, EXC_UD);
/* fall through */
--- a/xen/include/asm-x86/cpufeature.h
+++ b/xen/include/asm-x86/cpufeature.h
@@ -117,6 +117,7 @@
#define cpu_has_rdpid boot_cpu_has(X86_FEATURE_RDPID)
/* CPUID level 0x00000007:0.edx */
+#define cpu_has_avx512_4vnniw boot_cpu_has(X86_FEATURE_AVX512_4VNNIW)
#define cpu_has_avx512_4fmaps boot_cpu_has(X86_FEATURE_AVX512_4FMAPS)
/* CPUID level 0x80000007.edx */
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |