[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen master] x86/vmx: Remove HAVE_AS_{EPT,VMX}, GAS_VMX_OP(), etc
commit 1f2ea165f6b4be754ce02319f89df96a2a649d37 Author: Denis Mukhin <dmukhin@xxxxxxxx> AuthorDate: Thu Apr 3 18:23:04 2025 +0000 Commit: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> CommitDate: Thu Apr 3 21:29:34 2025 +0100 x86/vmx: Remove HAVE_AS_{EPT,VMX}, GAS_VMX_OP(), etc The new toolchain baseline knows the VMX instructions; no need to carry the workaround in the code. Inline __vmxoff() into it's single caller. Updated formatting in the wrappers to consistent. No functional change. Resolves: https://gitlab.com/xen-project/xen/-/work_items/202 Signed-off-by: Denis Mukhin <dmukhin@xxxxxxxx> Reviewed-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx> --- xen/arch/x86/arch.mk | 4 +- xen/arch/x86/hvm/vmx/vmcs.c | 2 +- xen/arch/x86/include/asm/hvm/vmx/vmx.h | 125 +++++---------------------------- 3 files changed, 19 insertions(+), 112 deletions(-) diff --git a/xen/arch/x86/arch.mk b/xen/arch/x86/arch.mk index 258e459bec..e9fa1c92d7 100644 --- a/xen/arch/x86/arch.mk +++ b/xen/arch/x86/arch.mk @@ -10,9 +10,7 @@ CFLAGS += -msoft-float $(call cc-options-add,CFLAGS,CC,$(EMBEDDED_EXTRA_CFLAGS)) $(call cc-option-add,CFLAGS,CC,-Wnested-externs) -$(call as-option-add,CFLAGS,CC,"vmcall",-DHAVE_AS_VMX) $(call as-option-add,CFLAGS,CC,"crc32 %eax$(comma)%eax",-DHAVE_AS_SSE4_2) -$(call as-option-add,CFLAGS,CC,"invept (%rax)$(comma)%rax",-DHAVE_AS_EPT) $(call as-option-add,CFLAGS,CC,"rdrand %eax",-DHAVE_AS_RDRAND) $(call as-option-add,CFLAGS,CC,"rdfsbase %rax",-DHAVE_AS_FSGSBASE) $(call as-option-add,CFLAGS,CC,"xsaveopt (%rax)",-DHAVE_AS_XSAVEOPT) @@ -23,7 +21,7 @@ $(call as-option-add,CFLAGS,CC,"invpcid (%rax)$(comma)%rax",-DHAVE_AS_INVPCID) $(call as-option-add,CFLAGS,CC,"movdiri %rax$(comma)(%rax)",-DHAVE_AS_MOVDIR) $(call as-option-add,CFLAGS,CC,"enqcmd (%rax)$(comma)%rax",-DHAVE_AS_ENQCMD) -# Check to see whether the assmbler supports the .nop directive. +# Check to see whether the assembler supports the .nop directive. $(call as-option-add,CFLAGS,CC,\ ".L1: .L2: .nops (.L2 - .L1)$(comma)9",-DHAVE_AS_NOPS_DIRECTIVE) diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c index 1d427100ce..a44475ae15 100644 --- a/xen/arch/x86/hvm/vmx/vmcs.c +++ b/xen/arch/x86/hvm/vmx/vmcs.c @@ -811,7 +811,7 @@ void cf_check vmx_cpu_down(void) BUG_ON(!(read_cr4() & X86_CR4_VMXE)); this_cpu(vmxon) = 0; - __vmxoff(); + asm volatile ( "vmxoff" ::: "memory" ); local_irq_restore(flags); } diff --git a/xen/arch/x86/include/asm/hvm/vmx/vmx.h b/xen/arch/x86/include/asm/hvm/vmx/vmx.h index 7c6ba73407..affb3a8bd6 100644 --- a/xen/arch/x86/include/asm/hvm/vmx/vmx.h +++ b/xen/arch/x86/include/asm/hvm/vmx/vmx.h @@ -257,24 +257,6 @@ typedef union cr_access_qual { #define X86_SEG_AR_GRANULARITY (1u << 15) /* 15, granularity */ #define X86_SEG_AR_SEG_UNUSABLE (1u << 16) /* 16, segment unusable */ -#define VMCALL_OPCODE ".byte 0x0f,0x01,0xc1\n" -#define VMCLEAR_OPCODE ".byte 0x66,0x0f,0xc7\n" /* reg/opcode: /6 */ -#define VMLAUNCH_OPCODE ".byte 0x0f,0x01,0xc2\n" -#define VMPTRLD_OPCODE ".byte 0x0f,0xc7\n" /* reg/opcode: /6 */ -#define VMPTRST_OPCODE ".byte 0x0f,0xc7\n" /* reg/opcode: /7 */ -#define VMREAD_OPCODE ".byte 0x0f,0x78\n" -#define VMRESUME_OPCODE ".byte 0x0f,0x01,0xc3\n" -#define VMWRITE_OPCODE ".byte 0x0f,0x79\n" -#define INVEPT_OPCODE ".byte 0x66,0x0f,0x38,0x80\n" /* m128,r64/32 */ -#define INVVPID_OPCODE ".byte 0x66,0x0f,0x38,0x81\n" /* m128,r64/32 */ -#define VMXOFF_OPCODE ".byte 0x0f,0x01,0xc4\n" -#define VMXON_OPCODE ".byte 0xf3,0x0f,0xc7\n" - -#define MODRM_EAX_08 ".byte 0x08\n" /* ECX, [EAX] */ -#define MODRM_EAX_06 ".byte 0x30\n" /* [EAX], with reg/opcode: /6 */ -#define MODRM_EAX_07 ".byte 0x38\n" /* [EAX], with reg/opcode: /7 */ -#define MODRM_EAX_ECX ".byte 0xc1\n" /* EAX, ECX */ - extern uint8_t posted_intr_vector; #define cpu_has_vmx_ept_exec_only_supported \ @@ -310,99 +292,54 @@ extern uint8_t posted_intr_vector; #define INVVPID_ALL_CONTEXT 2 #define INVVPID_SINGLE_CONTEXT_RETAINING_GLOBAL 3 -#ifdef HAVE_AS_VMX -# define GAS_VMX_OP(yes, no) yes -#else -# define GAS_VMX_OP(yes, no) no -#endif - static always_inline void __vmptrld(u64 addr) { - asm volatile ( -#ifdef HAVE_AS_VMX - "vmptrld %0\n" -#else - VMPTRLD_OPCODE MODRM_EAX_06 -#endif + asm volatile ( "vmptrld %0\n\t" /* CF==1 or ZF==1 --> BUG() */ UNLIKELY_START(be, vmptrld) _ASM_BUGFRAME_TEXT(0) UNLIKELY_END_SECTION : -#ifdef HAVE_AS_VMX : "m" (addr), -#else - : "a" (&addr), -#endif _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0) - : "memory"); + : "memory" ); } static always_inline void __vmpclear(u64 addr) { - asm volatile ( -#ifdef HAVE_AS_VMX - "vmclear %0\n" -#else - VMCLEAR_OPCODE MODRM_EAX_06 -#endif + asm volatile ( "vmclear %0\n\t" /* CF==1 or ZF==1 --> BUG() */ UNLIKELY_START(be, vmclear) _ASM_BUGFRAME_TEXT(0) UNLIKELY_END_SECTION : -#ifdef HAVE_AS_VMX : "m" (addr), -#else - : "a" (&addr), -#endif _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0) - : "memory"); + : "memory" ); } static always_inline void __vmread(unsigned long field, unsigned long *value) { - asm volatile ( -#ifdef HAVE_AS_VMX - "vmread %1, %0\n\t" -#else - VMREAD_OPCODE MODRM_EAX_ECX -#endif + asm volatile ( "vmread %1, %0\n\t" /* CF==1 or ZF==1 --> BUG() */ UNLIKELY_START(be, vmread) _ASM_BUGFRAME_TEXT(0) UNLIKELY_END_SECTION -#ifdef HAVE_AS_VMX : "=rm" (*value) : "r" (field), -#else - : "=c" (*value) - : "a" (field), -#endif - _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0) - ); + _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0) ); } static always_inline void __vmwrite(unsigned long field, unsigned long value) { - asm volatile ( -#ifdef HAVE_AS_VMX - "vmwrite %1, %0\n" -#else - VMWRITE_OPCODE MODRM_EAX_ECX -#endif + asm volatile ( "vmwrite %1, %0\n" /* CF==1 or ZF==1 --> BUG() */ UNLIKELY_START(be, vmwrite) _ASM_BUGFRAME_TEXT(0) UNLIKELY_END_SECTION : -#ifdef HAVE_AS_VMX : "r" (field) , "rm" (value), -#else - : "a" (field) , "c" (value), -#endif - _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0) - ); + _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0) ); } static inline enum vmx_insn_errno vmread_safe(unsigned long field, @@ -411,14 +348,13 @@ static inline enum vmx_insn_errno vmread_safe(unsigned long field, unsigned long ret = VMX_INSN_SUCCEED; bool fail_invalid, fail_valid; - asm volatile ( GAS_VMX_OP("vmread %[field], %[value]\n\t", - VMREAD_OPCODE MODRM_EAX_ECX) + asm volatile ( "vmread %[field], %[value]\n\t" ASM_FLAG_OUT(, "setc %[invalid]\n\t") ASM_FLAG_OUT(, "setz %[valid]\n\t") : ASM_FLAG_OUT("=@ccc", [invalid] "=rm") (fail_invalid), ASM_FLAG_OUT("=@ccz", [valid] "=rm") (fail_valid), - [value] GAS_VMX_OP("=rm", "=c") (*value) - : [field] GAS_VMX_OP("r", "a") (field)); + [value] "=rm" (*value) + : [field] "r" (field) ); if ( unlikely(fail_invalid) ) ret = VMX_INSN_FAIL_INVALID; @@ -434,14 +370,13 @@ static inline enum vmx_insn_errno vmwrite_safe(unsigned long field, unsigned long ret = VMX_INSN_SUCCEED; bool fail_invalid, fail_valid; - asm volatile ( GAS_VMX_OP("vmwrite %[value], %[field]\n\t", - VMWRITE_OPCODE MODRM_EAX_ECX) + asm volatile ( "vmwrite %[value], %[field]\n\t" ASM_FLAG_OUT(, "setc %[invalid]\n\t") ASM_FLAG_OUT(, "setz %[valid]\n\t") : ASM_FLAG_OUT("=@ccc", [invalid] "=rm") (fail_invalid), ASM_FLAG_OUT("=@ccz", [valid] "=rm") (fail_valid) - : [field] GAS_VMX_OP("r", "a") (field), - [value] GAS_VMX_OP("rm", "c") (value)); + : [field] "r" (field), + [value] "rm" (value) ); if ( unlikely(fail_invalid) ) ret = VMX_INSN_FAIL_INVALID; @@ -465,22 +400,13 @@ static always_inline void __invept(unsigned long type, uint64_t eptp) !cpu_has_vmx_ept_invept_single_context ) type = INVEPT_ALL_CONTEXT; - asm volatile ( -#ifdef HAVE_AS_EPT - "invept %0, %1\n" -#else - INVEPT_OPCODE MODRM_EAX_08 -#endif + asm volatile ( "invept %0, %1\n\t" /* CF==1 or ZF==1 --> BUG() */ UNLIKELY_START(be, invept) _ASM_BUGFRAME_TEXT(0) UNLIKELY_END_SECTION : -#ifdef HAVE_AS_EPT : "m" (operand), "r" (type), -#else - : "a" (&operand), "c" (type), -#endif _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0) : "memory" ); } @@ -494,24 +420,14 @@ static always_inline void __invvpid(unsigned long type, u16 vpid, u64 gva) } operand = {vpid, 0, gva}; /* Fix up #UD exceptions which occur when TLBs are flushed before VMXON. */ - asm volatile ( "1: " -#ifdef HAVE_AS_EPT - "invvpid %0, %1\n" -#else - INVVPID_OPCODE MODRM_EAX_08 -#endif + asm volatile ( "1: invvpid %0, %1\n\t" /* CF==1 or ZF==1 --> BUG() */ UNLIKELY_START(be, invvpid) _ASM_BUGFRAME_TEXT(0) UNLIKELY_END_SECTION "\n" - "2:" - _ASM_EXTABLE(1b, 2b) + "2:" _ASM_EXTABLE(1b, 2b) : -#ifdef HAVE_AS_EPT : "m" (operand), "r" (type), -#else - : "a" (&operand), "c" (type), -#endif _ASM_BUGFRAME_INFO(BUGFRAME_bug, __LINE__, __FILE__, 0) : "memory" ); } @@ -552,13 +468,6 @@ static inline void vpid_sync_all(void) __invvpid(INVVPID_ALL_CONTEXT, 0, 0); } -static inline void __vmxoff(void) -{ - asm volatile ( - VMXOFF_OPCODE - : : : "memory" ); -} - int cf_check vmx_guest_x86_mode(struct vcpu *v); unsigned int vmx_get_cpl(void); -- generated by git-patchbot for /home/xen/git/xen.git#master
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |