|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCH 13/17] xen: arm64: asm: remove redundant "cc" clobbers
This resyncs atomics and cmpxchgs with Linux v3.14-rc7 by importing:
commit 95c4189689f92fba7ecf9097173404d4928c6e9b
Author: Will Deacon <will.deacon@xxxxxxx>
Date: Tue Feb 4 12:29:13 2014 +0000
arm64: asm: remove redundant "cc" clobbers
cbnz/tbnz don't update the condition flags, so remove the "cc" clobbers
from inline asm blocks that only use these instructions to implement
conditional branches.
Signed-off-by: Will Deacon <will.deacon@xxxxxxx>
Signed-off-by: Catalin Marinas <catalin.marinas@xxxxxxx>
Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---
xen/include/asm-arm/arm64/atomic.h | 12 +++++-------
xen/include/asm-arm/arm64/spinlock.h | 6 +++---
xen/include/asm-arm/arm64/system.h | 8 ++++----
3 files changed, 12 insertions(+), 14 deletions(-)
diff --git a/xen/include/asm-arm/arm64/atomic.h
b/xen/include/asm-arm/arm64/atomic.h
index 3f37ed5..b5d50f2 100644
--- a/xen/include/asm-arm/arm64/atomic.h
+++ b/xen/include/asm-arm/arm64/atomic.h
@@ -38,8 +38,7 @@ static inline void atomic_add(int i, atomic_t *v)
" stxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "cc");
+ : "Ir" (i));
}
static inline int atomic_add_return(int i, atomic_t *v)
@@ -54,7 +53,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
- : "cc", "memory");
+ : "memory");
smp_mb();
return result;
@@ -71,8 +70,7 @@ static inline void atomic_sub(int i, atomic_t *v)
" stxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "cc");
+ : "Ir" (i));
}
static inline int atomic_sub_return(int i, atomic_t *v)
@@ -87,7 +85,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
- : "cc", "memory");
+ : "memory");
smp_mb();
return result;
@@ -109,7 +107,7 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old,
int new)
"2:"
: "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
: "Ir" (old), "r" (new)
- : "cc", "memory");
+ : "cc");
smp_mb();
return oldval;
diff --git a/xen/include/asm-arm/arm64/spinlock.h
b/xen/include/asm-arm/arm64/spinlock.h
index 3a36cfd..04300bc 100644
--- a/xen/include/asm-arm/arm64/spinlock.h
+++ b/xen/include/asm-arm/arm64/spinlock.h
@@ -70,7 +70,7 @@ static always_inline int _raw_read_trylock(raw_rwlock_t *rw)
"1:\n"
: "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
:
- : "cc", "memory");
+ : "memory");
return !tmp2;
}
@@ -86,7 +86,7 @@ static always_inline int _raw_write_trylock(raw_rwlock_t *rw)
"1:\n"
: "=&r" (tmp), "+Q" (rw->lock)
: "r" (0x80000000)
- : "cc", "memory");
+ : "memory");
return !tmp;
}
@@ -102,7 +102,7 @@ static inline void _raw_read_unlock(raw_rwlock_t *rw)
" cbnz %w1, 1b\n"
: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
:
- : "cc", "memory");
+ : "memory");
}
static inline void _raw_write_unlock(raw_rwlock_t *rw)
diff --git a/xen/include/asm-arm/arm64/system.h
b/xen/include/asm-arm/arm64/system.h
index 9fa698b..fa50ead 100644
--- a/xen/include/asm-arm/arm64/system.h
+++ b/xen/include/asm-arm/arm64/system.h
@@ -16,7 +16,7 @@ static inline unsigned long __xchg(unsigned long x, volatile
void *ptr, int size
" cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
: "r" (x)
- : "cc", "memory");
+ : "memory");
break;
case 2:
asm volatile("// __xchg2\n"
@@ -25,7 +25,7 @@ static inline unsigned long __xchg(unsigned long x, volatile
void *ptr, int size
" cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
: "r" (x)
- : "cc", "memory");
+ : "memory");
break;
case 4:
asm volatile("// __xchg4\n"
@@ -34,7 +34,7 @@ static inline unsigned long __xchg(unsigned long x, volatile
void *ptr, int size
" cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
: "r" (x)
- : "cc", "memory");
+ : "memory");
break;
case 8:
asm volatile("// __xchg8\n"
@@ -43,7 +43,7 @@ static inline unsigned long __xchg(unsigned long x, volatile
void *ptr, int size
" cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
: "r" (x)
- : "cc", "memory");
+ : "memory");
break;
default:
__bad_xchg(ptr, size), ret = 0;
--
1.7.10.4
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |