|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [RFC PATCH v2 09/15] xen/arm64: port Linux's arm64 atomic_lse.h to Xen
From: Ash Wilding <ash.j.wilding@xxxxxxxxx>
As with the LL/SC atomics helpers, most of the "work" here is simply
deleting the atomic64_t helper definitions as we don't have an
atomic64_t type in Xen.
We do also need to s/__always_inline/always_inline/ to match the
qualifier name used by Xen.
Signed-off-by: Ash Wilding <ash.j.wilding@xxxxxxxxx>
---
xen/include/asm-arm/arm64/atomic_lse.h | 189 ++-----------------------
1 file changed, 8 insertions(+), 181 deletions(-)
diff --git a/xen/include/asm-arm/arm64/atomic_lse.h
b/xen/include/asm-arm/arm64/atomic_lse.h
index b3b0d43a7d..81613f7250 100644
--- a/xen/include/asm-arm/arm64/atomic_lse.h
+++ b/xen/include/asm-arm/arm64/atomic_lse.h
@@ -1,14 +1,15 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
+
/*
- * Based on arch/arm/include/asm/atomic.h
+ * Taken from Linux 5.10-rc2 (last commit 3cea11cd5)
*
* Copyright (C) 1996 Russell King.
* Copyright (C) 2002 Deep Blue Solutions Ltd.
* Copyright (C) 2012 ARM Ltd.
+ * SPDX-License-Identifier: GPL-2.0-only
*/
-#ifndef __ASM_ATOMIC_LSE_H
-#define __ASM_ATOMIC_LSE_H
+#ifndef __ASM_ARM_ARM64_ATOMIC_LSE_H
+#define __ASM_ARM_ARM64_ATOMIC_LSE_H
#define ATOMIC_OP(op, asm_op) \
static inline void __lse_atomic_##op(int i, atomic_t *v)
\
@@ -163,182 +164,8 @@ ATOMIC_FETCH_OP_SUB( , al, "memory")
#undef ATOMIC_FETCH_OP_SUB
-#define ATOMIC64_OP(op, asm_op)
\
-static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \
-{ \
- asm volatile( \
- __LSE_PREAMBLE \
-" " #asm_op " %[i], %[v]\n" \
- : [i] "+r" (i), [v] "+Q" (v->counter) \
- : "r" (v)); \
-}
-
-ATOMIC64_OP(andnot, stclr)
-ATOMIC64_OP(or, stset)
-ATOMIC64_OP(xor, steor)
-ATOMIC64_OP(add, stadd)
-
-#undef ATOMIC64_OP
-
-#define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \
-static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\
-{ \
- asm volatile( \
- __LSE_PREAMBLE \
-" " #asm_op #mb " %[i], %[i], %[v]" \
- : [i] "+r" (i), [v] "+Q" (v->counter) \
- : "r" (v) \
- : cl); \
- \
- return i; \
-}
-
-#define ATOMIC64_FETCH_OPS(op, asm_op) \
- ATOMIC64_FETCH_OP(_relaxed, , op, asm_op) \
- ATOMIC64_FETCH_OP(_acquire, a, op, asm_op, "memory") \
- ATOMIC64_FETCH_OP(_release, l, op, asm_op, "memory") \
- ATOMIC64_FETCH_OP( , al, op, asm_op, "memory")
-
-ATOMIC64_FETCH_OPS(andnot, ldclr)
-ATOMIC64_FETCH_OPS(or, ldset)
-ATOMIC64_FETCH_OPS(xor, ldeor)
-ATOMIC64_FETCH_OPS(add, ldadd)
-
-#undef ATOMIC64_FETCH_OP
-#undef ATOMIC64_FETCH_OPS
-
-#define ATOMIC64_OP_ADD_RETURN(name, mb, cl...)
\
-static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\
-{ \
- unsigned long tmp; \
- \
- asm volatile( \
- __LSE_PREAMBLE \
- " ldadd" #mb " %[i], %x[tmp], %[v]\n" \
- " add %[i], %[i], %x[tmp]" \
- : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
- : "r" (v) \
- : cl); \
- \
- return i; \
-}
-
-ATOMIC64_OP_ADD_RETURN(_relaxed, )
-ATOMIC64_OP_ADD_RETURN(_acquire, a, "memory")
-ATOMIC64_OP_ADD_RETURN(_release, l, "memory")
-ATOMIC64_OP_ADD_RETURN( , al, "memory")
-
-#undef ATOMIC64_OP_ADD_RETURN
-
-static inline void __lse_atomic64_and(s64 i, atomic64_t *v)
-{
- asm volatile(
- __LSE_PREAMBLE
- " mvn %[i], %[i]\n"
- " stclr %[i], %[v]"
- : [i] "+&r" (i), [v] "+Q" (v->counter)
- : "r" (v));
-}
-
-#define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \
-static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v)
\
-{ \
- asm volatile( \
- __LSE_PREAMBLE \
- " mvn %[i], %[i]\n" \
- " ldclr" #mb " %[i], %[i], %[v]" \
- : [i] "+&r" (i), [v] "+Q" (v->counter) \
- : "r" (v) \
- : cl); \
- \
- return i; \
-}
-
-ATOMIC64_FETCH_OP_AND(_relaxed, )
-ATOMIC64_FETCH_OP_AND(_acquire, a, "memory")
-ATOMIC64_FETCH_OP_AND(_release, l, "memory")
-ATOMIC64_FETCH_OP_AND( , al, "memory")
-
-#undef ATOMIC64_FETCH_OP_AND
-
-static inline void __lse_atomic64_sub(s64 i, atomic64_t *v)
-{
- asm volatile(
- __LSE_PREAMBLE
- " neg %[i], %[i]\n"
- " stadd %[i], %[v]"
- : [i] "+&r" (i), [v] "+Q" (v->counter)
- : "r" (v));
-}
-
-#define ATOMIC64_OP_SUB_RETURN(name, mb, cl...)
\
-static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v)
\
-{ \
- unsigned long tmp; \
- \
- asm volatile( \
- __LSE_PREAMBLE \
- " neg %[i], %[i]\n" \
- " ldadd" #mb " %[i], %x[tmp], %[v]\n" \
- " add %[i], %[i], %x[tmp]" \
- : [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \
- : "r" (v) \
- : cl); \
- \
- return i; \
-}
-
-ATOMIC64_OP_SUB_RETURN(_relaxed, )
-ATOMIC64_OP_SUB_RETURN(_acquire, a, "memory")
-ATOMIC64_OP_SUB_RETURN(_release, l, "memory")
-ATOMIC64_OP_SUB_RETURN( , al, "memory")
-
-#undef ATOMIC64_OP_SUB_RETURN
-
-#define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \
-static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v)
\
-{ \
- asm volatile( \
- __LSE_PREAMBLE \
- " neg %[i], %[i]\n" \
- " ldadd" #mb " %[i], %[i], %[v]" \
- : [i] "+&r" (i), [v] "+Q" (v->counter) \
- : "r" (v) \
- : cl); \
- \
- return i; \
-}
-
-ATOMIC64_FETCH_OP_SUB(_relaxed, )
-ATOMIC64_FETCH_OP_SUB(_acquire, a, "memory")
-ATOMIC64_FETCH_OP_SUB(_release, l, "memory")
-ATOMIC64_FETCH_OP_SUB( , al, "memory")
-
-#undef ATOMIC64_FETCH_OP_SUB
-
-static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v)
-{
- unsigned long tmp;
-
- asm volatile(
- __LSE_PREAMBLE
- "1: ldr %x[tmp], %[v]\n"
- " subs %[ret], %x[tmp], #1\n"
- " b.lt 2f\n"
- " casal %x[tmp], %[ret], %[v]\n"
- " sub %x[tmp], %x[tmp], #1\n"
- " sub %x[tmp], %x[tmp], %[ret]\n"
- " cbnz %x[tmp], 1b\n"
- "2:"
- : [ret] "+&r" (v), [v] "+Q" (v->counter), [tmp] "=&r" (tmp)
- :
- : "cc", "memory");
-
- return (long)v;
-}
-
#define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...) \
-static __always_inline u##sz \
+static always_inline u##sz \
__lse__cmpxchg_case_##name##sz(volatile void *ptr, \
u##sz old, \
u##sz new) \
@@ -381,7 +208,7 @@ __CMPXCHG_CASE(x, , mb_, 64, al, "memory")
#undef __CMPXCHG_CASE
#define __CMPXCHG_DBL(name, mb, cl...) \
-static __always_inline long \
+static always_inline long \
__lse__cmpxchg_double##name(unsigned long old1,
\
unsigned long old2, \
unsigned long new1, \
@@ -416,4 +243,4 @@ __CMPXCHG_DBL(_mb, al, "memory")
#undef __CMPXCHG_DBL
-#endif /* __ASM_ATOMIC_LSE_H */
\ No newline at end of file
+#endif /* __ASM_ARM_ARM64_ATOMIC_LSE_H */
\ No newline at end of file
--
2.24.3 (Apple Git-128)
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |