[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 09/45] xen: arm64: atomics



Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---
 xen/include/asm-arm/arm32/atomic.h |  151 ++++++++++++++++++
 xen/include/asm-arm/arm64/atomic.h |  300 ++++++++++++++++++++++++++++++++++++
 xen/include/asm-arm/atomic.h       |  186 ++++------------------
 3 files changed, 484 insertions(+), 153 deletions(-)
 create mode 100644 xen/include/asm-arm/arm32/atomic.h
 create mode 100644 xen/include/asm-arm/arm64/atomic.h

diff --git a/xen/include/asm-arm/arm32/atomic.h 
b/xen/include/asm-arm/arm32/atomic.h
new file mode 100644
index 0000000..4ee6626
--- /dev/null
+++ b/xen/include/asm-arm/arm32/atomic.h
@@ -0,0 +1,151 @@
+/*
+ *  arch/arm/include/asm/atomic.h
+ *
+ *  Copyright (C) 1996 Russell King.
+ *  Copyright (C) 2002 Deep Blue Solutions Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __ARCH_ARM_ARM32_ATOMIC__
+#define __ARCH_ARM_ARM32_ATOMIC__
+
+/*
+ * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
+ * store exclusive to ensure that these are atomic.  We may loop
+ * to ensure that the update happens.
+ */
+static inline void atomic_add(int i, atomic_t *v)
+{
+        unsigned long tmp;
+        int result;
+
+        __asm__ __volatile__("@ atomic_add\n"
+"1:     ldrex   %0, [%3]\n"
+"       add     %0, %0, %4\n"
+"       strex   %1, %0, [%3]\n"
+"       teq     %1, #0\n"
+"       bne     1b"
+        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+        : "r" (&v->counter), "Ir" (i)
+        : "cc");
+}
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+        unsigned long tmp;
+        int result;
+
+        smp_mb();
+
+        __asm__ __volatile__("@ atomic_add_return\n"
+"1:     ldrex   %0, [%3]\n"
+"       add     %0, %0, %4\n"
+"       strex   %1, %0, [%3]\n"
+"       teq     %1, #0\n"
+"       bne     1b"
+        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+        : "r" (&v->counter), "Ir" (i)
+        : "cc");
+
+        smp_mb();
+
+        return result;
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+        unsigned long tmp;
+        int result;
+
+        __asm__ __volatile__("@ atomic_sub\n"
+"1:     ldrex   %0, [%3]\n"
+"       sub     %0, %0, %4\n"
+"       strex   %1, %0, [%3]\n"
+"       teq     %1, #0\n"
+"       bne     1b"
+        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+        : "r" (&v->counter), "Ir" (i)
+        : "cc");
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+        unsigned long tmp;
+        int result;
+
+        smp_mb();
+
+        __asm__ __volatile__("@ atomic_sub_return\n"
+"1:     ldrex   %0, [%3]\n"
+"       sub     %0, %0, %4\n"
+"       strex   %1, %0, [%3]\n"
+"       teq     %1, #0\n"
+"       bne     1b"
+        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
+        : "r" (&v->counter), "Ir" (i)
+        : "cc");
+
+        smp_mb();
+
+        return result;
+}
+
+static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
+{
+        unsigned long oldval, res;
+
+        smp_mb();
+
+        do {
+                __asm__ __volatile__("@ atomic_cmpxchg\n"
+                "ldrex  %1, [%3]\n"
+                "mov    %0, #0\n"
+                "teq    %1, %4\n"
+                "strexeq %0, %5, [%3]\n"
+                    : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
+                    : "r" (&ptr->counter), "Ir" (old), "r" (new)
+                    : "cc");
+        } while (res);
+
+        smp_mb();
+
+        return oldval;
+}
+
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+        unsigned long tmp, tmp2;
+
+        __asm__ __volatile__("@ atomic_clear_mask\n"
+"1:     ldrex   %0, [%3]\n"
+"       bic     %0, %0, %4\n"
+"       strex   %1, %0, [%3]\n"
+"       teq     %1, #0\n"
+"       bne     1b"
+        : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
+        : "r" (addr), "Ir" (mask)
+        : "cc");
+}
+
+#define atomic_inc(v)           atomic_add(1, v)
+#define atomic_dec(v)           atomic_sub(1, v)
+
+#define atomic_inc_and_test(v)  (atomic_add_return(1, v) == 0)
+#define atomic_dec_and_test(v)  (atomic_sub_return(1, v) == 0)
+#define atomic_inc_return(v)    (atomic_add_return(1, v))
+#define atomic_dec_return(v)    (atomic_sub_return(1, v))
+#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+
+#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
+
+#endif /* __ARCH_ARM_ARM32_ATOMIC__ */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/arm64/atomic.h 
b/xen/include/asm-arm/arm64/atomic.h
new file mode 100644
index 0000000..1997e18
--- /dev/null
+++ b/xen/include/asm-arm/arm64/atomic.h
@@ -0,0 +1,300 @@
+/*
+ * Based on arch/arm64/include/asm/atomic.h
+ * which in turn is
+ * Based on arch/arm/include/asm/atomic.h
+ *
+ * Copyright (C) 1996 Russell King.
+ * Copyright (C) 2002 Deep Blue Solutions Ltd.
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+#ifndef __ARCH_ARM_ARM64_ATOMIC
+#define __ARCH_ARM_ARM64_ATOMIC
+
+/*
+ * AArch64 UP and SMP safe atomic ops.  We use load exclusive and
+ * store exclusive to ensure that these are atomic.  We may loop
+ * to ensure that the update happens.
+ */
+static inline void atomic_add(int i, atomic_t *v)
+{
+       unsigned long tmp;
+       int result;
+
+       asm volatile("// atomic_add\n"
+"1:    ldxr    %w0, [%3]\n"
+"      add     %w0, %w0, %w4\n"
+"      stxr    %w1, %w0, [%3]\n"
+"      cbnz    %w1, 1b"
+       : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
+       : "r" (&v->counter), "Ir" (i)
+       : "cc");
+}
+
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+       unsigned long tmp;
+       int result;
+
+       asm volatile("// atomic_add_return\n"
+"1:    ldaxr   %w0, [%3]\n"
+"      add     %w0, %w0, %w4\n"
+"      stlxr   %w1, %w0, [%3]\n"
+"      cbnz    %w1, 1b"
+       : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
+       : "r" (&v->counter), "Ir" (i)
+       : "cc");
+
+       return result;
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+       unsigned long tmp;
+       int result;
+
+       asm volatile("// atomic_sub\n"
+"1:    ldxr    %w0, [%3]\n"
+"      sub     %w0, %w0, %w4\n"
+"      stxr    %w1, %w0, [%3]\n"
+"      cbnz    %w1, 1b"
+       : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
+       : "r" (&v->counter), "Ir" (i)
+       : "cc");
+}
+
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+       unsigned long tmp;
+       int result;
+
+       asm volatile("// atomic_sub_return\n"
+"1:    ldaxr   %w0, [%3]\n"
+"      sub     %w0, %w0, %w4\n"
+"      stlxr   %w1, %w0, [%3]\n"
+"      cbnz    %w1, 1b"
+       : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
+       : "r" (&v->counter), "Ir" (i)
+       : "cc");
+
+       return result;
+}
+
+static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
+{
+       unsigned long tmp;
+       int oldval;
+
+       asm volatile("// atomic_cmpxchg\n"
+"1:    ldaxr   %w1, [%3]\n"
+"      cmp     %w1, %w4\n"
+"      b.ne    2f\n"
+"      stlxr   %w0, %w5, [%3]\n"
+"      cbnz    %w0, 1b\n"
+"2:"
+       : "=&r" (tmp), "=&r" (oldval), "+o" (ptr->counter)
+       : "r" (&ptr->counter), "Ir" (old), "r" (new)
+       : "cc");
+
+       return oldval;
+}
+
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+       unsigned long tmp, tmp2;
+
+       asm volatile("// atomic_clear_mask\n"
+"1:    ldxr    %0, [%3]\n"
+"      bic     %0, %0, %4\n"
+"      stxr    %w1, %0, [%3]\n"
+"      cbnz    %w1, 1b"
+       : "=&r" (tmp), "=&r" (tmp2), "+o" (*addr)
+       : "r" (addr), "Ir" (mask)
+       : "cc");
+}
+
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
+static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+{
+       int c, old;
+
+       c = atomic_read(v);
+       while (c != u && (old = atomic_cmpxchg((v), c, c + a)) != c)
+               c = old;
+       return c;
+}
+
+#define atomic_inc(v)          atomic_add(1, v)
+#define atomic_dec(v)          atomic_sub(1, v)
+
+#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
+#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
+#define atomic_inc_return(v)    (atomic_add_return(1, v))
+#define atomic_dec_return(v)    (atomic_sub_return(1, v))
+#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+
+#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
+
+#define smp_mb__before_atomic_dec()    smp_mb()
+#define smp_mb__after_atomic_dec()     smp_mb()
+#define smp_mb__before_atomic_inc()    smp_mb()
+#define smp_mb__after_atomic_inc()     smp_mb()
+
+#if 0 /* Currently unused in Xen */
+/*
+ * 64-bit atomic operations.
+ */
+
+#define ATOMIC64_INIT(i) { (i) }
+
+#define atomic64_read(v)       (*(volatile long long *)&(v)->counter)
+#define atomic64_set(v,i)      (((v)->counter) = (i))
+
+static inline void atomic64_add(u64 i, atomic64_t *v)
+{
+       long result;
+       unsigned long tmp;
+
+       asm volatile("// atomic64_add\n"
+"1:    ldxr    %0, [%3]\n"
+"      add     %0, %0, %4\n"
+"      stxr    %w1, %0, [%3]\n"
+"      cbnz    %w1, 1b"
+       : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
+       : "r" (&v->counter), "Ir" (i)
+       : "cc");
+}
+
+static inline long atomic64_add_return(long i, atomic64_t *v)
+{
+       long result;
+       unsigned long tmp;
+
+       asm volatile("// atomic64_add_return\n"
+"1:    ldaxr   %0, [%3]\n"
+"      add     %0, %0, %4\n"
+"      stlxr   %w1, %0, [%3]\n"
+"      cbnz    %w1, 1b"
+       : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
+       : "r" (&v->counter), "Ir" (i)
+       : "cc");
+
+       return result;
+}
+
+static inline void atomic64_sub(u64 i, atomic64_t *v)
+{
+       long result;
+       unsigned long tmp;
+
+       asm volatile("// atomic64_sub\n"
+"1:    ldxr    %0, [%3]\n"
+"      sub     %0, %0, %4\n"
+"      stxr    %w1, %0, [%3]\n"
+"      cbnz    %w1, 1b"
+       : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
+       : "r" (&v->counter), "Ir" (i)
+       : "cc");
+}
+
+static inline long atomic64_sub_return(long i, atomic64_t *v)
+{
+       long result;
+       unsigned long tmp;
+
+       asm volatile("// atomic64_sub_return\n"
+"1:    ldaxr   %0, [%3]\n"
+"      sub     %0, %0, %4\n"
+"      stlxr   %w1, %0, [%3]\n"
+"      cbnz    %w1, 1b"
+       : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
+       : "r" (&v->counter), "Ir" (i)
+       : "cc");
+
+       return result;
+}
+
+static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
+{
+       long oldval;
+       unsigned long res;
+
+       asm volatile("// atomic64_cmpxchg\n"
+"1:    ldaxr   %1, [%3]\n"
+"      cmp     %1, %4\n"
+"      b.ne    2f\n"
+"      stlxr   %w0, %5, [%3]\n"
+"      cbnz    %w0, 1b\n"
+"2:"
+       : "=&r" (res), "=&r" (oldval), "+o" (ptr->counter)
+       : "r" (&ptr->counter), "Ir" (old), "r" (new)
+       : "cc");
+
+       return oldval;
+}
+
+#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
+
+static inline long atomic64_dec_if_positive(atomic64_t *v)
+{
+       long result;
+       unsigned long tmp;
+
+       asm volatile("// atomic64_dec_if_positive\n"
+"1:    ldaxr   %0, [%3]\n"
+"      subs    %0, %0, #1\n"
+"      b.mi    2f\n"
+"      stlxr   %w1, %0, [%3]\n"
+"      cbnz    %w1, 1b\n"
+"2:"
+       : "=&r" (result), "=&r" (tmp), "+o" (v->counter)
+       : "r" (&v->counter)
+       : "cc");
+
+       return result;
+}
+
+static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
+{
+       long c, old;
+
+       c = atomic64_read(v);
+       while (c != u && (old = atomic64_cmpxchg((v), c, c + a)) != c)
+               c = old;
+
+       return c != u;
+}
+
+#define atomic64_add_negative(a, v)    (atomic64_add_return((a), (v)) < 0)
+#define atomic64_inc(v)                        atomic64_add(1LL, (v))
+#define atomic64_inc_return(v)         atomic64_add_return(1LL, (v))
+#define atomic64_inc_and_test(v)       (atomic64_inc_return(v) == 0)
+#define atomic64_sub_and_test(a, v)    (atomic64_sub_return((a), (v)) == 0)
+#define atomic64_dec(v)                        atomic64_sub(1LL, (v))
+#define atomic64_dec_return(v)         atomic64_sub_return(1LL, (v))
+#define atomic64_dec_and_test(v)       (atomic64_dec_return((v)) == 0)
+#define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1LL, 0LL)
+
+#endif /* 0 */
+
+#endif
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/atomic.h b/xen/include/asm-arm/atomic.h
index c7eadd6..b37b2d0 100644
--- a/xen/include/asm-arm/atomic.h
+++ b/xen/include/asm-arm/atomic.h
@@ -1,48 +1,49 @@
-/*
- *  arch/arm/include/asm/atomic.h
- *
- *  Copyright (C) 1996 Russell King.
- *  Copyright (C) 2002 Deep Blue Solutions Ltd.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
 #ifndef __ARCH_ARM_ATOMIC__
 #define __ARCH_ARM_ATOMIC__
 
 #include <xen/config.h>
 #include <asm/system.h>
 
-#define build_atomic_read(name, size, type, reg)   \
+#define build_atomic_read(name, size, width, type, reg)\
 static inline type name(const volatile type *addr) \
 {                                                  \
     type ret;                                      \
-    asm volatile("ldr" size " %0,%1"               \
+    asm volatile("ldr" size " %" width "0,%1"      \
                  : reg (ret)                       \
                  : "m" (*(volatile type *)addr));  \
     return ret;                                    \
 }
 
-#define build_atomic_write(name, size, type, reg)      \
+#define build_atomic_write(name, size, width, type, reg) \
 static inline void name(volatile type *addr, type val) \
 {                                                      \
-    asm volatile("str" size " %1,%0"                   \
+    asm volatile("str" size " %"width"1,%0"            \
                  : "=m" (*(volatile type *)addr)       \
                  : reg (val));                         \
 }
 
-build_atomic_read(read_u8_atomic, "b", uint8_t, "=q")
-build_atomic_read(read_u16_atomic, "h", uint16_t, "=r")
-build_atomic_read(read_u32_atomic, "", uint32_t, "=r")
-//build_atomic_read(read_u64_atomic, "d", uint64_t, "=r")
-build_atomic_read(read_int_atomic, "", int, "=r")
-
-build_atomic_write(write_u8_atomic, "b", uint8_t, "q")
-build_atomic_write(write_u16_atomic, "h", uint16_t, "r")
-build_atomic_write(write_u32_atomic, "", uint32_t, "r")
-//build_atomic_write(write_u64_atomic, "d", uint64_t, "r")
-build_atomic_write(write_int_atomic, "", int, "r")
+#if defined (CONFIG_ARM_32)
+#define BYTE ""
+#define WORD ""
+#elif defined (CONFIG_ARM_64)
+#define BYTE "w"
+#define WORD "w"
+#endif
+
+build_atomic_read(read_u8_atomic,  "b", BYTE, uint8_t, "=r")
+build_atomic_read(read_u16_atomic, "h", WORD, uint16_t, "=r")
+build_atomic_read(read_u32_atomic, "",  WORD, uint32_t, "=r")
+build_atomic_read(read_int_atomic, "",  WORD, int, "=r")
+
+build_atomic_write(write_u8_atomic,  "b", BYTE, uint8_t, "r")
+build_atomic_write(write_u16_atomic, "h", WORD, uint16_t, "r")
+build_atomic_write(write_u32_atomic, "",  WORD, uint32_t, "r")
+build_atomic_write(write_int_atomic, "",  WORD, int, "r")
+
+#if 0 /* defined (CONFIG_ARM_64) */
+build_atomic_read(read_u64_atomic, "x", uint64_t, "=r")
+build_atomic_write(write_u64_atomic, "x", uint64_t, "r")
+#endif
 
 void __bad_atomic_size(void);
 
@@ -88,134 +89,13 @@ typedef struct { int counter; } atomic_t;
 #define _atomic_set(v,i) (((v).counter) = (i))
 #define atomic_set(v,i) (((v)->counter) = (i))
 
-/*
- * ARMv6 UP and SMP safe atomic ops.  We use load exclusive and
- * store exclusive to ensure that these are atomic.  We may loop
- * to ensure that the update happens.
- */
-static inline void atomic_add(int i, atomic_t *v)
-{
-        unsigned long tmp;
-        int result;
-
-        __asm__ __volatile__("@ atomic_add\n"
-"1:     ldrex   %0, [%3]\n"
-"       add     %0, %0, %4\n"
-"       strex   %1, %0, [%3]\n"
-"       teq     %1, #0\n"
-"       bne     1b"
-        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
-        : "r" (&v->counter), "Ir" (i)
-        : "cc");
-}
-
-static inline int atomic_add_return(int i, atomic_t *v)
-{
-        unsigned long tmp;
-        int result;
-
-        smp_mb();
-
-        __asm__ __volatile__("@ atomic_add_return\n"
-"1:     ldrex   %0, [%3]\n"
-"       add     %0, %0, %4\n"
-"       strex   %1, %0, [%3]\n"
-"       teq     %1, #0\n"
-"       bne     1b"
-        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
-        : "r" (&v->counter), "Ir" (i)
-        : "cc");
-
-        smp_mb();
-
-        return result;
-}
-
-static inline void atomic_sub(int i, atomic_t *v)
-{
-        unsigned long tmp;
-        int result;
-
-        __asm__ __volatile__("@ atomic_sub\n"
-"1:     ldrex   %0, [%3]\n"
-"       sub     %0, %0, %4\n"
-"       strex   %1, %0, [%3]\n"
-"       teq     %1, #0\n"
-"       bne     1b"
-        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
-        : "r" (&v->counter), "Ir" (i)
-        : "cc");
-}
-
-static inline int atomic_sub_return(int i, atomic_t *v)
-{
-        unsigned long tmp;
-        int result;
-
-        smp_mb();
-
-        __asm__ __volatile__("@ atomic_sub_return\n"
-"1:     ldrex   %0, [%3]\n"
-"       sub     %0, %0, %4\n"
-"       strex   %1, %0, [%3]\n"
-"       teq     %1, #0\n"
-"       bne     1b"
-        : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
-        : "r" (&v->counter), "Ir" (i)
-        : "cc");
-
-        smp_mb();
-
-        return result;
-}
-
-static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
-{
-        unsigned long oldval, res;
-
-        smp_mb();
-
-        do {
-                __asm__ __volatile__("@ atomic_cmpxchg\n"
-                "ldrex  %1, [%3]\n"
-                "mov    %0, #0\n"
-                "teq    %1, %4\n"
-                "strexeq %0, %5, [%3]\n"
-                    : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
-                    : "r" (&ptr->counter), "Ir" (old), "r" (new)
-                    : "cc");
-        } while (res);
-
-        smp_mb();
-
-        return oldval;
-}
-
-static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
-{
-        unsigned long tmp, tmp2;
-
-        __asm__ __volatile__("@ atomic_clear_mask\n"
-"1:     ldrex   %0, [%3]\n"
-"       bic     %0, %0, %4\n"
-"       strex   %1, %0, [%3]\n"
-"       teq     %1, #0\n"
-"       bne     1b"
-        : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
-        : "r" (addr), "Ir" (mask)
-        : "cc");
-}
-
-#define atomic_inc(v)           atomic_add(1, v)
-#define atomic_dec(v)           atomic_sub(1, v)
-
-#define atomic_inc_and_test(v)  (atomic_add_return(1, v) == 0)
-#define atomic_dec_and_test(v)  (atomic_sub_return(1, v) == 0)
-#define atomic_inc_return(v)    (atomic_add_return(1, v))
-#define atomic_dec_return(v)    (atomic_sub_return(1, v))
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-
-#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
+#if defined(CONFIG_ARM_32)
+# include <asm/arm32/atomic.h>
+#elif defined(CONFIG_ARM_64)
+# include <asm/arm64/atomic.h>
+#else
+# error "unknown ARM variant"
+#endif
 
 static inline atomic_t atomic_compareandswap(
     atomic_t old, atomic_t new, atomic_t *v)
-- 
1.7.2.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.