|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH] x86/atomic: Improvements and simplifications to assembly constraints
* Some of the single-byte versions specify "=q" as the output. AFAICT, there
was not a legitimate reason to restrict the use of %esi/%edi in the 32-bit
build. Either way, in 64-bit, it is equivelent to "=r".
* Constraints in the form "=r" (x) : "0" (x) can be folded to just "+r" (x)
* Switch to using named parameters (mostly for legibility) which in
particular helps with...
* __xchg(), __cmpxchg() and __cmpxchg_user() modify their memory operand, so
must list it as an output operand. This only works because they each have
a memory clobber to give the construct full compiler-barrier properties.
* Every memory operand has an explicit known size. Letting the compiler see
the real size rather than obscuring it with __xg() allows for the removal
of the instruction size suffixes without introducing ambiguity.
Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Wei Liu <wei.liu2@xxxxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
Interestingly, switching to use output memory operands has the following
perturbance in the build:
add/remove: 0/0 grow/shrink: 3/5 up/down: 70/-124 (-54)
Function old new delta
do_mmu_update 7041 7101 +60
mctelem_process_deferred 234 242 +8
cpufreq_governor_dbs 851 853 +2
_set_status 162 161 -1
create_irq 325 323 -2
do_tmem_put 2066 2062 -4
task_switch_load_seg 892 884 -8
_get_page_type 6057 5948 -109
but as far as I can tell, it is exclusively down to different register
scheduling choices.
---
xen/include/asm-x86/system.h | 99 +++++++++++++++++--------------------
xen/include/asm-x86/x86_64/system.h | 24 ++++-----
2 files changed, 57 insertions(+), 66 deletions(-)
diff --git a/xen/include/asm-x86/system.h b/xen/include/asm-x86/system.h
index 483cd20..8764e31 100644
--- a/xen/include/asm-x86/system.h
+++ b/xen/include/asm-x86/system.h
@@ -23,9 +23,6 @@
#define xchg(ptr,v) \
((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
-struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) ((volatile struct __xchg_dummy *)(x))
-
#include <asm/x86_64/system.h>
/*
@@ -39,28 +36,24 @@ static always_inline unsigned long __xchg(
switch ( size )
{
case 1:
- asm volatile ( "xchgb %b0,%1"
- : "=q" (x)
- : "m" (*__xg(ptr)), "0" (x)
- : "memory" );
+ asm volatile ( "xchg %b[x], %[ptr]"
+ : [x] "+r" (x), [ptr] "+m" (*(uint8_t *)ptr)
+ :: "memory" );
break;
case 2:
- asm volatile ( "xchgw %w0,%1"
- : "=r" (x)
- : "m" (*__xg(ptr)), "0" (x)
- : "memory" );
+ asm volatile ( "xchg %w[x], %[ptr]"
+ : [x] "+r" (x), [ptr] "+m" (*(uint16_t *)ptr)
+ :: "memory" );
break;
case 4:
- asm volatile ( "xchgl %k0,%1"
- : "=r" (x)
- : "m" (*__xg(ptr)), "0" (x)
- : "memory" );
+ asm volatile ( "xchg %k[x], %[ptr]"
+ : [x] "+r" (x), [ptr] "+m" (*(uint32_t *)ptr)
+ :: "memory" );
break;
case 8:
- asm volatile ( "xchgq %0,%1"
- : "=r" (x)
- : "m" (*__xg(ptr)), "0" (x)
- : "memory" );
+ asm volatile ( "xchg %q[x], %[ptr]"
+ : [x] "+r" (x), [ptr] "+m" (*(uint64_t *)ptr)
+ :: "memory" );
break;
}
return x;
@@ -79,31 +72,27 @@ static always_inline unsigned long __cmpxchg(
switch ( size )
{
case 1:
- asm volatile ( "lock; cmpxchgb %b1,%2"
- : "=a" (prev)
- : "q" (new), "m" (*__xg(ptr)),
- "0" (old)
+ asm volatile ( "lock; cmpxchg %b[new], %[ptr]"
+ : "=a" (prev), [ptr] "+m" (*(uint8_t *)ptr)
+ : [new] "r" (new), "0" (old)
: "memory" );
return prev;
case 2:
- asm volatile ( "lock; cmpxchgw %w1,%2"
- : "=a" (prev)
- : "r" (new), "m" (*__xg(ptr)),
- "0" (old)
+ asm volatile ( "lock; cmpxchg %w[new], %[ptr]"
+ : "=a" (prev), [ptr] "+m" (*(uint16_t *)ptr)
+ : [new] "r" (new), "0" (old)
: "memory" );
return prev;
case 4:
- asm volatile ( "lock; cmpxchgl %k1,%2"
- : "=a" (prev)
- : "r" (new), "m" (*__xg(ptr)),
- "0" (old)
+ asm volatile ( "lock; cmpxchg %k[new], %[ptr]"
+ : "=a" (prev), [ptr] "+m" (*(uint32_t *)ptr)
+ : [new] "r" (new), "0" (old)
: "memory" );
return prev;
case 8:
- asm volatile ( "lock; cmpxchgq %1,%2"
- : "=a" (prev)
- : "r" (new), "m" (*__xg(ptr)),
- "0" (old)
+ asm volatile ( "lock; cmpxchg %q[new], %[ptr]"
+ : "=a" (prev), [ptr] "+m" (*(uint64_t *)ptr)
+ : [new] "r" (new), "0" (old)
: "memory" );
return prev;
}
@@ -118,24 +107,24 @@ static always_inline unsigned long cmpxchg_local_(
switch ( size )
{
case 1:
- asm volatile ( "cmpxchgb %b2, %1"
- : "=a" (prev), "+m" (*(uint8_t *)ptr)
- : "q" (new), "0" (old) );
+ asm volatile ( "cmpxchg %b[new], %[ptr]"
+ : "=a" (prev), [ptr] "+m" (*(uint8_t *)ptr)
+ : [new] "r" (new), "0" (old) );
break;
case 2:
- asm volatile ( "cmpxchgw %w2, %1"
- : "=a" (prev), "+m" (*(uint16_t *)ptr)
- : "r" (new), "0" (old) );
+ asm volatile ( "cmpxchg %w[new], %[ptr]"
+ : "=a" (prev), [ptr] "+m" (*(uint16_t *)ptr)
+ : [new] "r" (new), "0" (old) );
break;
case 4:
- asm volatile ( "cmpxchgl %k2, %1"
- : "=a" (prev), "+m" (*(uint32_t *)ptr)
- : "r" (new), "0" (old) );
+ asm volatile ( "cmpxchg %k[new], %[ptr]"
+ : "=a" (prev), [ptr] "+m" (*(uint32_t *)ptr)
+ : [new] "r" (new), "0" (old) );
break;
case 8:
- asm volatile ( "cmpxchgq %2, %1"
- : "=a" (prev), "+m" (*(uint64_t *)ptr)
- : "r" (new), "0" (old) );
+ asm volatile ( "cmpxchg %q[new], %[ptr]"
+ : "=a" (prev), [ptr] "+m" (*(uint64_t *)ptr)
+ : [new] "r" (new), "0" (old) );
break;
}
@@ -161,23 +150,23 @@ static always_inline unsigned long __xadd(
switch ( size )
{
case 1:
- asm volatile ( "lock; xaddb %b0,%1"
- : "+r" (v), "+m" (*__xg(ptr))
+ asm volatile ( "lock; xadd %b[v], %[ptr]"
+ : [v] "+r" (v), [ptr] "+m" (*(uint8_t *)ptr)
:: "memory");
return v;
case 2:
- asm volatile ( "lock; xaddw %w0,%1"
- : "+r" (v), "+m" (*__xg(ptr))
+ asm volatile ( "lock; xadd %w[v], %[ptr]"
+ : [v] "+r" (v), [ptr] "+m" (*(uint16_t *)ptr)
:: "memory");
return v;
case 4:
- asm volatile ( "lock; xaddl %k0,%1"
- : "+r" (v), "+m" (*__xg(ptr))
+ asm volatile ( "lock; xadd %k[v], %[ptr]"
+ : [v] "+r" (v), [ptr] "+m" (*(uint32_t *)ptr)
:: "memory");
return v;
case 8:
- asm volatile ( "lock; xaddq %q0,%1"
- : "+r" (v), "+m" (*__xg(ptr))
+ asm volatile ( "lock; xadd %q[v], %[ptr]"
+ : [v] "+r" (v), [ptr] "+m" (*(uint64_t *)ptr)
:: "memory");
return v;
diff --git a/xen/include/asm-x86/x86_64/system.h
b/xen/include/asm-x86/x86_64/system.h
index fae57ba..9c68f4f 100644
--- a/xen/include/asm-x86/x86_64/system.h
+++ b/xen/include/asm-x86/x86_64/system.h
@@ -25,7 +25,7 @@ static always_inline __uint128_t __cmpxchg16b(
/* Don't use "=A" here - clang can't deal with that. */
asm volatile ( "lock; cmpxchg16b %2"
- : "=d" (prev.hi), "=a" (prev.lo), "+m" (*__xg(ptr))
+ : "=d" (prev.hi), "=a" (prev.lo), "+m" (*(__uint128_t *)ptr)
: "c" (new.hi), "b" (new.lo), "0" (old.hi), "1" (old.lo) );
return prev.raw;
@@ -63,36 +63,38 @@ static always_inline __uint128_t cmpxchg16b_local_(
* If no fault occurs then _o is updated to the value we saw at _p. If this
* is the same as the initial value of _o then _n is written to location _p.
*/
-#define __cmpxchg_user(_p,_o,_n,_isuff,_oppre,_regtype) \
+#define __cmpxchg_user(_p, _o, _n, _oppre) \
stac(); \
asm volatile ( \
- "1: lock; cmpxchg"_isuff" %"_oppre"2,%3\n" \
+ "1: lock; cmpxchg %"_oppre"[new], %[ptr]\n" \
"2:\n" \
".section .fixup,\"ax\"\n" \
"3: movl $1,%1\n" \
" jmp 2b\n" \
".previous\n" \
_ASM_EXTABLE(1b, 3b) \
- : "=a" (_o), "=r" (_rc) \
- : _regtype (_n), "m" (*__xg((volatile void *)_p)), "0" (_o), "1" (0) \
+ : "+a" (_o), "=r" (_rc), \
+ [ptr] "+m" (*(volatile typeof(*(_p)) *)(_p)) \
+ : [new] "r" (_n), "1" (0) \
: "memory"); \
clac()
-#define cmpxchg_user(_p,_o,_n) \
+#define cmpxchg_user(_p, _o, _n) \
({ \
int _rc; \
- switch ( sizeof(*(_p)) ) { \
+ switch ( sizeof(*(_p)) ) \
+ { \
case 1: \
- __cmpxchg_user(_p,_o,_n,"b","b","q"); \
+ __cmpxchg_user(_p, _o, _n, "b"); \
break; \
case 2: \
- __cmpxchg_user(_p,_o,_n,"w","w","r"); \
+ __cmpxchg_user(_p, _o, _n, "w"); \
break; \
case 4: \
- __cmpxchg_user(_p,_o,_n,"l","k","r"); \
+ __cmpxchg_user(_p, _o, _n, "k"); \
break; \
case 8: \
- __cmpxchg_user(_p,_o,_n,"q","","r"); \
+ __cmpxchg_user(_p, _o, _n, "q"); \
break; \
} \
_rc; \
--
2.1.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |