[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH 2/3] x86: Use asm_inline for ALTERNATIVE() and EXTABLE



... when there really are only a few instructions in line.

In some cases, reformat to reduce left-hand margine space.

No functional change.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Stefano Stabellini <sstabellini@xxxxxxxxxx>
CC: Julien Grall <julien@xxxxxxx>
CC: Volodymyr Babchuk <Volodymyr_Babchuk@xxxxxxxx>
CC: Bertrand Marquis <bertrand.marquis@xxxxxxx>
CC: Michal Orzel <michal.orzel@xxxxxxx>

v2:
 * New, split out of previous single patch
 * Include EXTABLE

There are some uses of _ASM_EXTABLE() which are in blocks with many
instructions which are left unconverted.  There are also a couple for which I
already have pending cleanup, which I've left alone to reduce churn.
---
 xen/arch/x86/cpu/amd.c                      | 52 +++++++++++----------
 xen/arch/x86/domain.c                       | 21 +++++----
 xen/arch/x86/extable.c                      | 21 +++++----
 xen/arch/x86/hvm/vmx/vmcs.c                 | 15 +++---
 xen/arch/x86/i387.c                         |  4 +-
 xen/arch/x86/include/asm/alternative-call.h |  3 +-
 xen/arch/x86/include/asm/alternative.h      | 36 ++++++++------
 xen/arch/x86/include/asm/hvm/vmx/vmx.h      | 15 +++---
 xen/arch/x86/include/asm/uaccess.h          |  4 +-
 xen/arch/x86/pv/misc-hypercalls.c           | 19 ++++----
 xen/arch/x86/traps.c                        | 48 ++++++++++---------
 xen/arch/x86/usercopy.c                     |  6 +--
 12 files changed, 132 insertions(+), 112 deletions(-)

diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
index 37d67dd15c89..27ae16780857 100644
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -60,41 +60,45 @@ static inline int rdmsr_amd_safe(unsigned int msr, unsigned 
int *lo,
                                 unsigned int *hi)
 {
 #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
-    asm goto ( "1: rdmsr\n\t"
-               _ASM_EXTABLE(1b, %l[fault])
-               : "=a" (*lo), "=d" (*hi)
-               : "c" (msr), "D" (0x9c5a203a)
-               :
-               : fault );
+    asm_inline goto (
+        "1: rdmsr\n\t"
+        _ASM_EXTABLE(1b, %l[fault])
+        : "=a" (*lo), "=d" (*hi)
+        : "c" (msr), "D" (0x9c5a203a)
+        :
+        : fault );
+
     return 0;
 
  fault:
     return -EFAULT;
 #else
-       int err;
-
-       asm volatile("1: rdmsr\n2:\n"
-                    ".section .fixup,\"ax\"\n"
-                    "3: movl %6,%2\n"
-                    "   jmp 2b\n"
-                    ".previous\n"
-                    _ASM_EXTABLE(1b, 3b)
-                    : "=a" (*lo), "=d" (*hi), "=r" (err)
-                    : "c" (msr), "D" (0x9c5a203a), "2" (0), "i" (-EFAULT));
-
-       return err;
+    int err;
+
+    asm_inline volatile (
+        "1: rdmsr\n2:\n"
+        ".section .fixup,\"ax\"\n"
+        "3: movl %6,%2\n"
+        "   jmp 2b\n"
+        ".previous\n"
+        _ASM_EXTABLE(1b, 3b)
+        : "=a" (*lo), "=d" (*hi), "=r" (err)
+        : "c" (msr), "D" (0x9c5a203a), "2" (0), "i" (-EFAULT) );
+
+    return err;
 #endif
 }
 
 static inline int wrmsr_amd_safe(unsigned int msr, unsigned int lo,
                                  unsigned int hi)
 {
-    asm goto ( "1: wrmsr\n\t"
-               _ASM_EXTABLE(1b, %l[fault])
-               :
-               : "c" (msr), "a" (lo), "d" (hi), "D" (0x9c5a203a)
-               :
-               : fault );
+    asm_inline goto (
+        "1: wrmsr\n\t"
+        _ASM_EXTABLE(1b, %l[fault])
+        :
+        : "c" (msr), "a" (lo), "d" (hi), "D" (0x9c5a203a)
+        :
+        : fault );
 
     return 0;
 
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index f197dad4c0cd..7536b6c8717e 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1706,16 +1706,17 @@ static void load_segments(struct vcpu *n)
      * @all_segs_okay in function scope, and load NUL into @sel.
      */
 #define TRY_LOAD_SEG(seg, val)                          \
-    asm volatile ( "1: mov %k[_val], %%" #seg "\n\t"    \
-                   "2:\n\t"                             \
-                   ".section .fixup, \"ax\"\n\t"        \
-                   "3: xor %k[ok], %k[ok]\n\t"          \
-                   "   mov %k[ok], %%" #seg "\n\t"      \
-                   "   jmp 2b\n\t"                      \
-                   ".previous\n\t"                      \
-                   _ASM_EXTABLE(1b, 3b)                 \
-                   : [ok] "+r" (all_segs_okay)          \
-                   : [_val] "rm" (val) )
+    asm_inline volatile (                               \
+        "1: mov %k[_val], %%" #seg "\n\t"               \
+        "2:\n\t"                                        \
+        ".section .fixup, \"ax\"\n\t"                   \
+        "3: xor %k[ok], %k[ok]\n\t"                     \
+        "   mov %k[ok], %%" #seg "\n\t"                 \
+        "   jmp 2b\n\t"                                 \
+        ".previous\n\t"                                 \
+        _ASM_EXTABLE(1b, 3b)                            \
+        : [ok] "+r" (all_segs_okay)                     \
+        : [_val] "rm" (val) )
 
     if ( !compat )
     {
diff --git a/xen/arch/x86/extable.c b/xen/arch/x86/extable.c
index 1572efa69a00..de392024527c 100644
--- a/xen/arch/x86/extable.c
+++ b/xen/arch/x86/extable.c
@@ -186,16 +186,17 @@ int __init cf_check stub_selftest(void)
         place_ret(ptr + ARRAY_SIZE(tests[i].opc));
         unmap_domain_page(ptr);
 
-        asm volatile ( "INDIRECT_CALL %[stb]\n"
-                       ".Lret%=:\n\t"
-                       ".pushsection .fixup,\"ax\"\n"
-                       ".Lfix%=:\n\t"
-                       "pop %[exn]\n\t"
-                       "jmp .Lret%=\n\t"
-                       ".popsection\n\t"
-                       _ASM_EXTABLE(.Lret%=, .Lfix%=)
-                       : [exn] "+m" (res) ASM_CALL_CONSTRAINT
-                       : [stb] "r" (addr), "a" (tests[i].rax));
+        asm_inline volatile (
+            "INDIRECT_CALL %[stb]\n"
+            ".Lret%=:\n\t"
+            ".pushsection .fixup,\"ax\"\n"
+            ".Lfix%=:\n\t"
+            "pop %[exn]\n\t"
+            "jmp .Lret%=\n\t"
+            ".popsection\n\t"
+            _ASM_EXTABLE(.Lret%=, .Lfix%=)
+            : [exn] "+m" (res) ASM_CALL_CONSTRAINT
+            : [stb] "r" (addr), "a" (tests[i].rax) );
 
         if ( res.raw != tests[i].res.raw )
         {
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index a44475ae15bd..59f4d1d86f02 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -749,13 +749,14 @@ static int _vmx_cpu_up(bool bsp)
     if ( bsp && (rc = vmx_cpu_up_prepare(cpu)) != 0 )
         return rc;
 
-    asm goto ( "1: vmxon %[addr]\n\t"
-               "   jbe %l[vmxon_fail]\n\t"
-               _ASM_EXTABLE(1b, %l[vmxon_fault])
-               :
-               : [addr] "m" (this_cpu(vmxon_region))
-               : "memory"
-               : vmxon_fail, vmxon_fault );
+    asm_inline goto (
+        "1: vmxon %[addr]\n\t"
+        "   jbe %l[vmxon_fail]\n\t"
+        _ASM_EXTABLE(1b, %l[vmxon_fault])
+        :
+        : [addr] "m" (this_cpu(vmxon_region))
+        : "memory"
+        : vmxon_fail, vmxon_fault );
 
     this_cpu(vmxon) = 1;
 
diff --git a/xen/arch/x86/i387.c b/xen/arch/x86/i387.c
index 5429531ddd5f..b84cd6f7a9e1 100644
--- a/xen/arch/x86/i387.c
+++ b/xen/arch/x86/i387.c
@@ -62,7 +62,7 @@ static inline void fpu_fxrstor(struct vcpu *v)
     switch ( __builtin_expect(fpu_ctxt->x[FPU_WORD_SIZE_OFFSET], 8) )
     {
     default:
-        asm volatile (
+        asm_inline volatile (
             "1: fxrstorq %0\n"
             ".section .fixup,\"ax\"   \n"
             "2: push %%"__OP"ax       \n"
@@ -82,7 +82,7 @@ static inline void fpu_fxrstor(struct vcpu *v)
             : "m" (*fpu_ctxt), "i" (sizeof(*fpu_ctxt) / 4) );
         break;
     case 4: case 2:
-        asm volatile (
+        asm_inline volatile (
             "1: fxrstor %0         \n"
             ".section .fixup,\"ax\"\n"
             "2: push %%"__OP"ax    \n"
diff --git a/xen/arch/x86/include/asm/alternative-call.h 
b/xen/arch/x86/include/asm/alternative-call.h
index bbc49a5274d9..b22c10c32283 100644
--- a/xen/arch/x86/include/asm/alternative-call.h
+++ b/xen/arch/x86/include/asm/alternative-call.h
@@ -87,7 +87,8 @@ struct alt_call {
     rettype ret_;                                                  \
     register unsigned long r10_ asm("r10");                        \
     register unsigned long r11_ asm("r11");                        \
-    asm volatile ("1: call *%c[addr](%%rip)\n\t"                   \
+    asm_inline volatile (                                          \
+                  "1: call *%c[addr](%%rip)\n\t"                   \
                   ".pushsection .alt_call_sites, \"a\", @progbits\n\t"  \
                   ".long 1b - .\n\t"                               \
                   ".popsection"                                    \
diff --git a/xen/arch/x86/include/asm/alternative.h 
b/xen/arch/x86/include/asm/alternative.h
index e17be8ddfd82..0482bbf7cbf1 100644
--- a/xen/arch/x86/include/asm/alternative.h
+++ b/xen/arch/x86/include/asm/alternative.h
@@ -126,12 +126,15 @@ extern void alternative_instructions(void);
  * without volatile and memory clobber.
  */
 #define alternative(oldinstr, newinstr, feature)                        \
-        asm volatile (ALTERNATIVE(oldinstr, newinstr, feature) : : : "memory")
+    asm_inline volatile (                                               \
+        ALTERNATIVE(oldinstr, newinstr, feature)                        \
+        ::: "memory" )
 
 #define alternative_2(oldinstr, newinstr1, feature1, newinstr2, feature2) \
-       asm volatile (ALTERNATIVE_2(oldinstr, newinstr1, feature1,      \
-                                   newinstr2, feature2)                \
-                     : : : "memory")
+    asm_inline volatile (                                               \
+        ALTERNATIVE_2(oldinstr, newinstr1, feature1,                    \
+                      newinstr2, feature2)                              \
+        ::: "memory" )
 
 /*
  * Alternative inline assembly with input.
@@ -143,14 +146,16 @@ extern void alternative_instructions(void);
  * If you use variable sized constraints like "m" or "g" in the
  * replacement make sure to pad to the worst case length.
  */
-#define alternative_input(oldinstr, newinstr, feature, input...)       \
-       asm volatile (ALTERNATIVE(oldinstr, newinstr, feature)          \
-                     : : input)
+#define alternative_input(oldinstr, newinstr, feature, input...)        \
+    asm_inline volatile (                                               \
+        ALTERNATIVE(oldinstr, newinstr, feature)                        \
+        :: input )
 
 /* Like alternative_input, but with a single output argument */
-#define alternative_io(oldinstr, newinstr, feature, output, input...)  \
-       asm volatile (ALTERNATIVE(oldinstr, newinstr, feature)          \
-                     : output : input)
+#define alternative_io(oldinstr, newinstr, feature, output, input...)   \
+    asm_inline volatile (                                               \
+        ALTERNATIVE(oldinstr, newinstr, feature)                        \
+        : output : input )
 
 /*
  * This is similar to alternative_io. But it has two features and
@@ -160,11 +165,12 @@ extern void alternative_instructions(void);
  * Otherwise, if CPU has feature1, newinstr1 is used.
  * Otherwise, oldinstr is used.
  */
-#define alternative_io_2(oldinstr, newinstr1, feature1, newinstr2,     \
-                        feature2, output, input...)                    \
-       asm volatile(ALTERNATIVE_2(oldinstr, newinstr1, feature1,       \
-                                  newinstr2, feature2)                 \
-                    : output : input)
+#define alternative_io_2(oldinstr, newinstr1, feature1, newinstr2,      \
+                         feature2, output, input...)                    \
+    asm_inline volatile (                                               \
+        ALTERNATIVE_2(oldinstr, newinstr1, feature1,                    \
+                      newinstr2, feature2)                              \
+        : output : input )
 
 /* Use this macro(s) if you need more than one output parameter. */
 #define ASM_OUTPUT2(a...) a
diff --git a/xen/arch/x86/include/asm/hvm/vmx/vmx.h 
b/xen/arch/x86/include/asm/hvm/vmx/vmx.h
index d85b52b9d522..56bea252cc5a 100644
--- a/xen/arch/x86/include/asm/hvm/vmx/vmx.h
+++ b/xen/arch/x86/include/asm/hvm/vmx/vmx.h
@@ -431,13 +431,14 @@ static always_inline void __invvpid(unsigned long type, 
u16 vpid, u64 gva)
     }  operand = {vpid, 0, gva};
 
     /* Fix up #UD exceptions which occur when TLBs are flushed before VMXON. */
-    asm goto ( "1: invvpid %[operand], %[type]\n\t"
-               "   jbe %l[vmfail]\n\t"
-               "2:" _ASM_EXTABLE(1b, 2b)
-               :
-               : [operand] "m" (operand), [type] "r" (type)
-               : "memory"
-               : vmfail );
+    asm_inline goto (
+        "1: invvpid %[operand], %[type]\n\t"
+        "   jbe %l[vmfail]\n\t"
+        "2:" _ASM_EXTABLE(1b, 2b)
+        :
+        : [operand] "m" (operand), [type] "r" (type)
+        : "memory"
+        : vmfail );
     return;
 
  vmfail:
diff --git a/xen/arch/x86/include/asm/uaccess.h 
b/xen/arch/x86/include/asm/uaccess.h
index 2d01669b9610..719d053936b9 100644
--- a/xen/arch/x86/include/asm/uaccess.h
+++ b/xen/arch/x86/include/asm/uaccess.h
@@ -154,7 +154,7 @@ struct __large_struct { unsigned long buf[100]; };
  * aliasing issues.
  */
 #define put_unsafe_asm(x, addr, GUARD, err, itype, rtype, ltype, errret) \
-       __asm__ __volatile__(                                           \
+       asm_inline volatile (                                           \
                GUARD(                                                  \
                "       guest_access_mask_ptr %[ptr], %[scr1], %[scr2]\n" \
                )                                                       \
@@ -171,7 +171,7 @@ struct __large_struct { unsigned long buf[100]; };
                  "[ptr]" (addr), [errno] "i" (errret))
 
 #define get_unsafe_asm(x, addr, GUARD, err, rtype, ltype, errret)      \
-       __asm__ __volatile__(                                           \
+       asm_inline volatile (                                           \
                GUARD(                                                  \
                "       guest_access_mask_ptr %[ptr], %[scr1], %[scr2]\n" \
                )                                                       \
diff --git a/xen/arch/x86/pv/misc-hypercalls.c 
b/xen/arch/x86/pv/misc-hypercalls.c
index b529f00ea127..17030d800d1b 100644
--- a/xen/arch/x86/pv/misc-hypercalls.c
+++ b/xen/arch/x86/pv/misc-hypercalls.c
@@ -230,15 +230,16 @@ long do_set_segment_base(unsigned int which, unsigned 
long base)
          * Anyone wanting to check for errors from this hypercall should
          * re-read %gs and compare against the input.
          */
-        asm volatile ( "1: mov %[sel], %%gs\n\t"
-                       ".section .fixup, \"ax\", @progbits\n\t"
-                       "2: mov %k[flat], %%gs\n\t"
-                       "   xor %[sel], %[sel]\n\t"
-                       "   jmp 1b\n\t"
-                       ".previous\n\t"
-                       _ASM_EXTABLE(1b, 2b)
-                       : [sel] "+r" (sel)
-                       : [flat] "r" (FLAT_USER_DS32) );
+        asm_inline volatile (
+            "1: mov %[sel], %%gs\n\t"
+            ".section .fixup, \"ax\", @progbits\n\t"
+            "2: mov %k[flat], %%gs\n\t"
+            "   xor %[sel], %[sel]\n\t"
+            "   jmp 1b\n\t"
+            ".previous\n\t"
+            _ASM_EXTABLE(1b, 2b)
+            : [sel] "+r" (sel)
+            : [flat] "r" (FLAT_USER_DS32) );
 
         /* Update the cache of the inactive base, as read from the GDT/LDT. */
         v->arch.pv.gs_base_user = read_gs_base();
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 25e0d5777e6e..c94779b4ad4f 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -126,27 +126,29 @@ void show_code(const struct cpu_user_regs *regs)
      * Copy forward from regs->rip.  In the case of a fault, %ecx contains the
      * number of bytes remaining to copy.
      */
-    asm volatile ("1: rep movsb; 2:"
-                  _ASM_EXTABLE(1b, 2b)
-                  : "=&c" (missing_after),
-                    "=&D" (tmp), "=&S" (tmp)
-                  : "0" (ARRAY_SIZE(insns_after)),
-                    "1" (insns_after),
-                    "2" (regs->rip));
+    asm_inline volatile (
+        "1: rep movsb; 2:"
+        _ASM_EXTABLE(1b, 2b)
+        : "=&c" (missing_after),
+          "=&D" (tmp), "=&S" (tmp)
+        : "0" (ARRAY_SIZE(insns_after)),
+          "1" (insns_after),
+          "2" (regs->rip) );
 
     /*
      * Copy backwards from regs->rip - 1.  In the case of a fault, %ecx
      * contains the number of bytes remaining to copy.
      */
-    asm volatile ("std;"
-                  "1: rep movsb;"
-                  "2: cld;"
-                  _ASM_EXTABLE(1b, 2b)
-                  : "=&c" (missing_before),
-                    "=&D" (tmp), "=&S" (tmp)
-                  : "0" (ARRAY_SIZE(insns_before)),
-                    "1" (insns_before + ARRAY_SIZE(insns_before) - 1),
-                    "2" (regs->rip - 1));
+    asm_inline volatile (
+        "std;"
+        "1: rep movsb;"
+        "2: cld;"
+        _ASM_EXTABLE(1b, 2b)
+        : "=&c" (missing_before),
+          "=&D" (tmp), "=&S" (tmp)
+        : "0" (ARRAY_SIZE(insns_before)),
+          "1" (insns_before + ARRAY_SIZE(insns_before) - 1),
+          "2" (regs->rip - 1) );
     clac();
 
     printk("Xen code around <%p> (%ps)%s:\n",
@@ -524,12 +526,14 @@ static void show_trace(const struct cpu_user_regs *regs)
     printk("Xen call trace:\n");
 
     /* Guarded read of the stack top. */
-    asm ( "1: mov %[data], %[tos]; 2:\n"
-          ".pushsection .fixup,\"ax\"\n"
-          "3: movb $1, %[fault]; jmp 2b\n"
-          ".popsection\n"
-          _ASM_EXTABLE(1b, 3b)
-          : [tos] "+r" (tos), [fault] "+qm" (fault) : [data] "m" (*sp) );
+    asm_inline (
+        "1: mov %[data], %[tos]; 2:\n"
+        ".pushsection .fixup,\"ax\"\n"
+        "3: movb $1, %[fault]; jmp 2b\n"
+        ".popsection\n"
+        _ASM_EXTABLE(1b, 3b)
+        : [tos] "+r" (tos), [fault] "+qm" (fault)
+        : [data] "m" (*sp) );
 
     /*
      * If RIP looks sensible, or the top of the stack doesn't, print RIP at
diff --git a/xen/arch/x86/usercopy.c b/xen/arch/x86/usercopy.c
index 7ab2009efe4c..a24b52cc66c1 100644
--- a/xen/arch/x86/usercopy.c
+++ b/xen/arch/x86/usercopy.c
@@ -19,7 +19,7 @@ unsigned int copy_to_guest_ll(void __user *to, const void 
*from, unsigned int n)
     GUARD(unsigned dummy);
 
     stac();
-    asm volatile (
+    asm_inline volatile (
         GUARD(
         "    guest_access_mask_ptr %[to], %q[scratch1], %q[scratch2]\n"
         )
@@ -39,7 +39,7 @@ unsigned int copy_from_guest_ll(void *to, const void __user 
*from, unsigned int
     unsigned dummy;
 
     stac();
-    asm volatile (
+    asm_inline volatile (
         GUARD(
         "    guest_access_mask_ptr %[from], %q[scratch1], %q[scratch2]\n"
         )
@@ -101,7 +101,7 @@ unsigned int clear_guest_pv(void __user *to, unsigned int n)
         long dummy;
 
         stac();
-        asm volatile (
+        asm_inline volatile (
             "    guest_access_mask_ptr %[to], %[scratch1], %[scratch2]\n"
             "1:  rep stosb\n"
             "2:\n"
-- 
2.39.5




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.