[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[xen staging] x86/asm: Remove semicolon from LOCK prefix



commit f5aee09423fe010cf9c072cfff85710be462abef
Author:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
AuthorDate: Fri Feb 28 21:50:01 2025 +0000
Commit:     Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
CommitDate: Tue Mar 4 12:53:15 2025 +0000

    x86/asm: Remove semicolon from LOCK prefix
    
    Most of Xen's LOCK prefixes are already without semicolon, but we have a few
    still remaining in the tree.
    
    As noted in the Linux patch, this adversely affects size/inlining decisions,
    and prevents the assembler from diagnosing certain classes of error.
    
    No functional change.
    
    Link: 
https://lore.kernel.org/lkml/20250228085149.2478245-1-ubizjak@xxxxxxxxx/
    Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
    Acked-by: Roger Pau Monné <roger.pau@xxxxxxxxxx>
---
 xen/arch/x86/include/asm/atomic.h   | 16 ++++++++--------
 xen/arch/x86/include/asm/bitops.h   | 12 ++++++------
 xen/arch/x86/include/asm/spinlock.h |  2 +-
 3 files changed, 15 insertions(+), 15 deletions(-)

diff --git a/xen/arch/x86/include/asm/atomic.h 
b/xen/arch/x86/include/asm/atomic.h
index 16bd0ebfd7..ed4e09a503 100644
--- a/xen/arch/x86/include/asm/atomic.h
+++ b/xen/arch/x86/include/asm/atomic.h
@@ -115,7 +115,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int 
new)
 static inline void atomic_add(int i, atomic_t *v)
 {
     asm volatile (
-        "lock; addl %1,%0"
+        "lock addl %1,%0"
         : "=m" (*(volatile int *)&v->counter)
         : "ir" (i), "m" (*(volatile int *)&v->counter) );
 }
@@ -128,7 +128,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
 static inline void atomic_sub(int i, atomic_t *v)
 {
     asm volatile (
-        "lock; subl %1,%0"
+        "lock subl %1,%0"
         : "=m" (*(volatile int *)&v->counter)
         : "ir" (i), "m" (*(volatile int *)&v->counter) );
 }
@@ -142,7 +142,7 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
 {
     bool c;
 
-    asm volatile ( "lock; subl %[i], %[counter]\n\t"
+    asm volatile ( "lock subl %[i], %[counter]\n\t"
                    ASM_FLAG_OUT(, "setz %[zf]\n\t")
                    : [counter] "+m" (*(volatile int *)&v->counter),
                      [zf] ASM_FLAG_OUT("=@ccz", "=qm") (c)
@@ -154,7 +154,7 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
 static inline void atomic_inc(atomic_t *v)
 {
     asm volatile (
-        "lock; incl %0"
+        "lock incl %0"
         : "=m" (*(volatile int *)&v->counter)
         : "m" (*(volatile int *)&v->counter) );
 }
@@ -168,7 +168,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
 {
     bool c;
 
-    asm volatile ( "lock; incl %[counter]\n\t"
+    asm volatile ( "lock incl %[counter]\n\t"
                    ASM_FLAG_OUT(, "setz %[zf]\n\t")
                    : [counter] "+m" (*(volatile int *)&v->counter),
                      [zf] ASM_FLAG_OUT("=@ccz", "=qm") (c)
@@ -180,7 +180,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
 static inline void atomic_dec(atomic_t *v)
 {
     asm volatile (
-        "lock; decl %0"
+        "lock decl %0"
         : "=m" (*(volatile int *)&v->counter)
         : "m" (*(volatile int *)&v->counter) );
 }
@@ -194,7 +194,7 @@ static inline int atomic_dec_and_test(atomic_t *v)
 {
     bool c;
 
-    asm volatile ( "lock; decl %[counter]\n\t"
+    asm volatile ( "lock decl %[counter]\n\t"
                    ASM_FLAG_OUT(, "setz %[zf]\n\t")
                    : [counter] "+m" (*(volatile int *)&v->counter),
                      [zf] ASM_FLAG_OUT("=@ccz", "=qm") (c)
@@ -207,7 +207,7 @@ static inline int atomic_add_negative(int i, atomic_t *v)
 {
     bool c;
 
-    asm volatile ( "lock; addl %[i], %[counter]\n\t"
+    asm volatile ( "lock addl %[i], %[counter]\n\t"
                    ASM_FLAG_OUT(, "sets %[sf]\n\t")
                    : [counter] "+m" (*(volatile int *)&v->counter),
                      [sf] ASM_FLAG_OUT("=@ccs", "=qm") (c)
diff --git a/xen/arch/x86/include/asm/bitops.h 
b/xen/arch/x86/include/asm/bitops.h
index 39e37f1cbe..bb9d756460 100644
--- a/xen/arch/x86/include/asm/bitops.h
+++ b/xen/arch/x86/include/asm/bitops.h
@@ -32,7 +32,7 @@
  */
 static inline void set_bit(int nr, volatile void *addr)
 {
-    asm volatile ( "lock; btsl %1,%0"
+    asm volatile ( "lock btsl %1,%0"
                    : "+m" (ADDR) : "Ir" (nr) : "memory");
 }
 #define set_bit(nr, addr) ({                            \
@@ -73,7 +73,7 @@ static inline void constant_set_bit(int nr, void *addr)
  */
 static inline void clear_bit(int nr, volatile void *addr)
 {
-    asm volatile ( "lock; btrl %1,%0"
+    asm volatile ( "lock btrl %1,%0"
                    : "+m" (ADDR) : "Ir" (nr) : "memory");
 }
 #define clear_bit(nr, addr) ({                          \
@@ -140,7 +140,7 @@ static inline void constant_change_bit(int nr, void *addr)
  */
 static inline void change_bit(int nr, volatile void *addr)
 {
-    asm volatile ( "lock; btcl %1,%0"
+    asm volatile ( "lock btcl %1,%0"
                     : "+m" (ADDR) : "Ir" (nr) : "memory");
 }
 #define change_bit(nr, addr) ({                         \
@@ -160,7 +160,7 @@ static inline int test_and_set_bit(int nr, volatile void 
*addr)
 {
     int oldbit;
 
-    asm volatile ( "lock; btsl %[nr], %[addr]\n\t"
+    asm volatile ( "lock btsl %[nr], %[addr]\n\t"
                    ASM_FLAG_OUT(, "sbbl %[old], %[old]\n\t")
                    : [old] ASM_FLAG_OUT("=@ccc", "=r") (oldbit),
                      [addr] "+m" (ADDR) : [nr] "Ir" (nr) : "memory" );
@@ -206,7 +206,7 @@ static inline int test_and_clear_bit(int nr, volatile void 
*addr)
 {
     int oldbit;
 
-    asm volatile ( "lock; btrl %[nr], %[addr]\n\t"
+    asm volatile ( "lock btrl %[nr], %[addr]\n\t"
                    ASM_FLAG_OUT(, "sbbl %[old], %[old]\n\t")
                    : [old] ASM_FLAG_OUT("=@ccc", "=r") (oldbit),
                      [addr] "+m" (ADDR) : [nr] "Ir" (nr) : "memory" );
@@ -266,7 +266,7 @@ static inline int test_and_change_bit(int nr, volatile void 
*addr)
 {
     int oldbit;
 
-    asm volatile ( "lock; btcl %[nr], %[addr]\n\t"
+    asm volatile ( "lock btcl %[nr], %[addr]\n\t"
                    ASM_FLAG_OUT(, "sbbl %[old], %[old]\n\t")
                    : [old] ASM_FLAG_OUT("=@ccc", "=r") (oldbit),
                      [addr] "+m" (ADDR) : [nr] "Ir" (nr) : "memory" );
diff --git a/xen/arch/x86/include/asm/spinlock.h 
b/xen/arch/x86/include/asm/spinlock.h
index 56f6095752..834e8c580e 100644
--- a/xen/arch/x86/include/asm/spinlock.h
+++ b/xen/arch/x86/include/asm/spinlock.h
@@ -3,7 +3,7 @@
 
 #define _raw_read_unlock(l) \
     BUILD_BUG_ON(sizeof((l)->lock) != 4); /* Clang doesn't support %z in asm. 
*/ \
-    asm volatile ( "lock; decl %0" : "+m" ((l)->lock) :: "memory" )
+    asm volatile ( "lock decl %0" : "+m" ((l)->lock) :: "memory" )
 
 /*
  * On x86 the only reordering is of reads with older writes.  In the
--
generated by git-patchbot for /home/xen/git/xen.git#staging



 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.