[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

Re: [Xen-devel] [PATCH 3/4] VMX: use proper instruction mnemonics if assembler supports them



On 23/08/2013 15:03, Jan Beulich wrote:
With the hex byte emission we were taking away a good part of
flexibility from the compiler, as for simplicity reasons these were
built using fixed operands. All half way modern build environments
would allow using the mnemonics (but we can't disable the hex variants
yet, since the binutils around at the time gcc 4.1 got released didn't
support these yet).

I didn't convert __vmread() yet because that would, just like for
__vmread_safe(), imply converting to a macro so that the output operand
can be the caller supplied variable rather than an intermediate one. As
that would require touching all invocation points of __vmread() (of
which there are quite a few), I'd first like to be certain the approach
is acceptable; the main question being whether the now conditional code
might be considered to cause future maintenance issues, and the second
being that of parameter/argument ordering (here I made __vmread_safe()
match __vmwrite(), but one could also take the position that read and
write should use the inverse order of one another, in line with the
actual instruction operands).

Additionally I was quite puzzled to find that all the asm()-s involved
here have memory clobbers - what are they needed for? Or can they be
dropped at least in some cases?

Signed-off-by: Jan Beulich <jbeulich@xxxxxxxx>

I certainly like and encourage of the idea of getting rid of the fixed registers due to the hex literals.  However, are macros really the correct solution?

__vmread_safe(field, value) now has non-function like semantics for value.

To me, the more obvious (and as far as I can tell, functionally identical) approach would be for

unsigned long always_inline __vmread_safe(unsigned long field, unsigned long *value)

The compiler, given the correct constraints for value could resolve the pointer back to the caller as it inlined the function, and calls would look like "__vmread_safe(field, &value);" which looks rather more natural.

As for the memory clobbers, I see no indication from the VMX instruction reference that vmread or inv{ept,vpid} have relevant side effects requiring a clobber.  For the load functions and vmwrite, there is a possibilty of a memory clobber if someone has peaked into the relevant VMCS page(s).  However, doing so defeats the purpose of vmread/write, so as far as I can tell, we could argue away the clobbers on those grounds alone.

~Andrew


--- a/Config.mk
+++ b/Config.mk
@@ -4,6 +4,12 @@ ifeq ($(filter /%,$(XEN_ROOT)),)
 $(error XEN_ROOT must be absolute)
 endif
 
+# Convenient variables
+comma   := ,
+squote  := '
+empty   :=
+space   := $(empty) $(empty)
+
 # fallback for older make
 realpath = $(wildcard $(foreach file,$(1),$(shell cd -P $(dir $(file)) && echo "$$PWD/$(notdir $(file))")))
 
@@ -128,6 +134,21 @@ endef
 check-$(gcc) = $(call cc-ver-check,CC,0x040100,"Xen requires at least gcc-4.1")
 $(eval $(check-y))
 
+# as-insn: Check whether assembler supports an instruction.
+# Usage: cflags-y += $(call as-insn "insn",option-yes,option-no)
+as-insn = $(if $(shell echo 'void _(void) { asm volatile ( $(2) ); }' \
+                       | $(1) -c -x c -o /dev/null - 2>&1),$(4),$(3))
+
+# as-insn-check: Add an option to compilation flags, but only if insn is
+#                supported by assembler.
+# Usage: $(call as-insn-check CFLAGS,CC,"nop",-DHAVE_GAS_NOP)
+as-insn-check = $(eval $(call as-insn-check-closure,$(1),$(2),$(3),$(4)))
+define as-insn-check-closure
+    ifeq ($$(call as-insn,$$($(2)),$(3),y,n),y)
+        $(1) += $(4)
+    endif
+endef
+
 define buildmakevars2shellvars
     export PREFIX="$(PREFIX)";                                            \
     export XEN_SCRIPT_DIR="$(XEN_SCRIPT_DIR)";                            \
--- a/xen/arch/x86/Rules.mk
+++ b/xen/arch/x86/Rules.mk
@@ -28,6 +28,8 @@ CFLAGS += -msoft-float
 
 $(call cc-options-add,CFLAGS,CC,$(EMBEDDED_EXTRA_CFLAGS))
 $(call cc-option-add,CFLAGS,CC,-Wnested-externs)
+$(call as-insn-check,CFLAGS,CC,"vmcall",-DHAVE_GAS_VMX)
+$(call as-insn-check,CFLAGS,CC,"invept (%rax)$$(comma)%rax",-DHAVE_GAS_EPT)
 
 ifeq ($(supervisor_mode_kernel),y)
 CFLAGS += -DCONFIG_X86_SUPERVISOR_MODE_KERNEL=1
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1292,12 +1292,11 @@ void vmx_do_resume(struct vcpu *v)
     reset_stack_and_jump(vmx_asm_do_vmentry);
 }
 
-static unsigned long vmr(unsigned long field)
+static inline unsigned long vmr(unsigned long field)
 {
-    int rc;
     unsigned long val;
-    val = __vmread_safe(field, &rc);
-    return rc ? 0 : val;
+
+    return __vmread_safe(field, val) ? val : 0;
 }
 
 static void vmx_dump_sel(char *name, uint32_t selector)
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -951,13 +951,11 @@ fallback:
         vvmcs_to_shadow(vvmcs, field[i]);
 }
 
-static void shadow_to_vvmcs(void *vvmcs, unsigned int field)
+static inline void shadow_to_vvmcs(void *vvmcs, unsigned int field)
 {
     u64 value;
-    int rc;
 
-    value = __vmread_safe(field, &rc);
-    if ( !rc )
+    if ( __vmread_safe(field, value) )
         __set_vvmcs(vvmcs, field, value);
 }
 
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -282,27 +282,44 @@ extern uint8_t posted_intr_vector;
 
 static inline void __vmptrld(u64 addr)
 {
-    asm volatile ( VMPTRLD_OPCODE
-                   MODRM_EAX_06
+    asm volatile (
+#ifdef HAVE_GAS_VMX
+                   "vmptrld %0\n"
+#else
+                   VMPTRLD_OPCODE MODRM_EAX_06
+#endif
                    /* CF==1 or ZF==1 --> crash (ud2) */
                    UNLIKELY_START(be, vmptrld)
                    "\tud2\n"
                    UNLIKELY_END_SECTION
                    :
+#ifdef HAVE_GAS_VMX
+                   : "m" (addr)
+#else
                    : "a" (&addr)
+#endif
                    : "memory");
 }
 
 static inline void __vmpclear(u64 addr)
 {
-    asm volatile ( VMCLEAR_OPCODE
-                   MODRM_EAX_06
+    asm volatile (
+#ifdef HAVE_GAS_VMX
+                   "vmclear %0\n"
+#else
+                   VMCLEAR_OPCODE MODRM_EAX_06
+#endif
                    /* CF==1 or ZF==1 --> crash (ud2) */
                    UNLIKELY_START(be, vmclear)
                    "\tud2\n"
                    UNLIKELY_END_SECTION
+#ifdef HAVE_GAS_VMX
+                   : "+m" (addr)
+                   :
+#else
                    :
                    : "a" (&addr)
+#endif
                    : "memory");
 }
 
@@ -325,31 +342,58 @@ static inline unsigned long __vmread(uns
 
 static inline void __vmwrite(unsigned long field, unsigned long value)
 {
-    asm volatile ( VMWRITE_OPCODE
-                   MODRM_EAX_ECX
+    asm volatile (
+#ifdef HAVE_GAS_VMX
+                   "vmwrite %1, %0\n"
+#else
+                   VMWRITE_OPCODE MODRM_EAX_ECX
+#endif
                    /* CF==1 or ZF==1 --> crash (ud2) */
                    UNLIKELY_START(be, vmwrite)
                    "\tud2\n"
                    UNLIKELY_END_SECTION
                    : 
+#ifdef HAVE_GAS_VMX
+                   : "r" (field) , "rm" (value)
+#else
                    : "a" (field) , "c" (value)
+#endif
                    : "memory");
 }
 
-static inline unsigned long __vmread_safe(unsigned long field, int *error)
-{
-    unsigned long ecx;
-
-    asm volatile ( VMREAD_OPCODE
-                   MODRM_EAX_ECX
-                   /* CF==1 or ZF==1 --> rc = -1 */
-                   "setna %b0 ; neg %0"
-                   : "=q" (*error), "=c" (ecx)
-                   : "0" (0), "a" (field)
-                   : "memory");
-
-    return ecx;
-}
+#ifdef HAVE_GAS_VMX
+#define __vmread_safe(field, value) ({ \
+    bool_t okay_; \
+    if ( sizeof(value) == sizeof(long) ) \
+        asm volatile ( "vmread %q2, %1\n\t" \
+                       /* CF==1 or ZF==1 --> rc = 0 */ \
+                       "setnbe %0" \
+                       : "=qm" (okay_), "=rm" (value) \
+                       : "r" (field) \
+                       : "memory"); \
+    else \
+    { \
+        asm volatile ( "vmread %q2, %q1\n\t" \
+                       /* CF==1 or ZF==1 --> rc = 0 */ \
+                       "setnbe %0" \
+                       : "=qm" (okay_), "=r" (value) \
+                       : "r" (field) \
+                       : "memory"); \
+    } \
+    okay_; \
+})
+#else
+#define __vmread_safe(field, value) ({ \
+    bool_t okay_; \
+    asm volatile ( VMREAD_OPCODE MODRM_EAX_ECX \
+                   /* CF==1 or ZF==1 --> rc = 0 */ \
+                   "setnbe %0" \
+                   : "=qm" (okay_), "=c" (value) \
+                   : "a" (field) \
+                   : "memory"); \
+    okay_; \
+})
+#endif
 
 static inline void __invept(int type, u64 eptp, u64 gpa)
 {
@@ -365,14 +409,22 @@ static inline void __invept(int type, u6
          !cpu_has_vmx_ept_invept_single_context )
         type = INVEPT_ALL_CONTEXT;
 
-    asm volatile ( INVEPT_OPCODE
-                   MODRM_EAX_08
+    asm volatile (
+#ifdef HAVE_GAS_EPT
+                   "invept %0, %q1\n"
+#else
+                   INVEPT_OPCODE MODRM_EAX_08
+#endif
                    /* CF==1 or ZF==1 --> crash (ud2) */
                    UNLIKELY_START(be, invept)
                    "\tud2\n"
                    UNLIKELY_END_SECTION
                    :
+#ifdef HAVE_GAS_EPT
+                   : "m" (operand), "r" (type)
+#else
                    : "a" (&operand), "c" (type)
+#endif
                    : "memory" );
 }
 
@@ -385,14 +437,23 @@ static inline void __invvpid(int type, u
     } __attribute__ ((packed)) operand = {vpid, 0, gva};
 
     /* Fix up #UD exceptions which occur when TLBs are flushed before VMXON. */
-    asm volatile ( "1: " INVVPID_OPCODE MODRM_EAX_08
+    asm volatile ( "1: "
+#ifdef HAVE_GAS_EPT
+                   "invvpid %0, %q1\n"
+#else
+                   INVVPID_OPCODE MODRM_EAX_08
+#endif
                    /* CF==1 or ZF==1 --> crash (ud2) */
                    "2: " UNLIKELY_START(be, invvpid)
                    "\tud2\n"
                    UNLIKELY_END_SECTION "\n"
                    _ASM_EXTABLE(1b, 2b)
                    :
+#ifdef HAVE_GAS_EPT
+                   : "m" (operand), "r" (type)
+#else
                    : "a" (&operand), "c" (type)
+#endif
                    : "memory" );
 }
 




_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.