[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v4 1/7] asm-arm/atomic.h: fix arm32|arm64 macros duplication



Move duplicate macros between asm-arm/arm32/atomic.h and asm-arm/arm64/atomic.h
to asm-arm/atomic.h. Adjust README.LinuxPrimitives in the process.
Also empty line fixes.

Signed-off-by: Corneliu ZUZU <czuzu@xxxxxxxxxxxxxxx>
Reviewed-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>
---
Changed since v3:
  * update README.LinuxPrimitives file
---
 xen/arch/arm/README.LinuxPrimitives | 20 ++++++++++++++++++++
 xen/include/asm-arm/arm32/atomic.h  | 15 ++-------------
 xen/include/asm-arm/arm64/atomic.h  | 15 ++-------------
 xen/include/asm-arm/atomic.h        | 14 ++++++++++++++
 4 files changed, 38 insertions(+), 26 deletions(-)

diff --git a/xen/arch/arm/README.LinuxPrimitives 
b/xen/arch/arm/README.LinuxPrimitives
index 3115f51..4906593 100644
--- a/xen/arch/arm/README.LinuxPrimitives
+++ b/xen/arch/arm/README.LinuxPrimitives
@@ -23,6 +23,16 @@ atomics: last sync @ v3.16-rc6 (last commit: 8715466b6027)
 
 linux/arch/arm64/include/asm/atomic.h   xen/include/asm-arm/arm64/atomic.h
 
+The following functions were taken from Linux:
+    atomic_add(), atomic_add_return(), atomic_sub(), atomic_sub_return(),
+    atomic_cmpxchg(), __atomic_add_unless()
+
+Also, the following macros which were in the meantime moved to 
asm-arm/atomic.h:
+    atomic_xchg(v, new), atomic_inc(v), atomic_dec(v),
+    atomic_inc_and_test(v), atomic_dec_and_test(v),
+    atomic_inc_return(v), atomic_dec_return(v),
+    atomic_sub_and_test(i, v), atomic_add_negative(i,v)
+
 ---------------------------------------------------------------------
 
 mem*: last sync @ v3.16-rc6 (last commit: d875c9b37240)
@@ -91,6 +101,16 @@ atomics: last sync @ v3.16-rc6 (last commit: 030d0178bdbd)
 
 linux/arch/arm/include/asm/atomic.h     xen/include/asm-arm/arm32/atomic.h
 
+The following functions were taken from Linux:
+    atomic_add(), atomic_add_return(), atomic_sub(), atomic_sub_return(),
+    atomic_cmpxchg(), __atomic_add_unless()
+
+Also, the following macros which were in the meantime moved to 
asm-arm/atomic.h:
+    atomic_xchg(v, new), atomic_inc(v), atomic_dec(v),
+    atomic_inc_and_test(v), atomic_dec_and_test(v),
+    atomic_inc_return(v), atomic_dec_return(v),
+    atomic_sub_and_test(i, v), atomic_add_negative(i,v)
+
 ---------------------------------------------------------------------
 
 mem*: last sync @ v3.16-rc6 (last commit: d98b90ea22b0)
diff --git a/xen/include/asm-arm/arm32/atomic.h 
b/xen/include/asm-arm/arm32/atomic.h
index 7ec712f..78de60f 100644
--- a/xen/include/asm-arm/arm32/atomic.h
+++ b/xen/include/asm-arm/arm32/atomic.h
@@ -8,6 +8,7 @@
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
  */
+
 #ifndef __ARCH_ARM_ARM32_ATOMIC__
 #define __ARCH_ARM_ARM32_ATOMIC__
 
@@ -147,20 +148,8 @@ static inline int __atomic_add_unless(atomic_t *v, int a, 
int u)
        return oldval;
 }
 
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
-#define atomic_inc(v)          atomic_add(1, v)
-#define atomic_dec(v)          atomic_sub(1, v)
-
-#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
-#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
-#define atomic_inc_return(v)    (atomic_add_return(1, v))
-#define atomic_dec_return(v)    (atomic_sub_return(1, v))
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-
-#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
-
 #endif /* __ARCH_ARM_ARM32_ATOMIC__ */
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/include/asm-arm/arm64/atomic.h 
b/xen/include/asm-arm/arm64/atomic.h
index b49219e..d640bef 100644
--- a/xen/include/asm-arm/arm64/atomic.h
+++ b/xen/include/asm-arm/arm64/atomic.h
@@ -19,6 +19,7 @@
  * You should have received a copy of the GNU General Public License
  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  */
+
 #ifndef __ARCH_ARM_ARM64_ATOMIC
 #define __ARCH_ARM_ARM64_ATOMIC
 
@@ -113,8 +114,6 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, 
int new)
        return oldval;
 }
 
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
 {
        int c, old;
@@ -125,18 +124,8 @@ static inline int __atomic_add_unless(atomic_t *v, int a, 
int u)
        return c;
 }
 
-#define atomic_inc(v)          atomic_add(1, v)
-#define atomic_dec(v)          atomic_sub(1, v)
-
-#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
-#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
-#define atomic_inc_return(v)    (atomic_add_return(1, v))
-#define atomic_dec_return(v)    (atomic_sub_return(1, v))
-#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
-
-#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
-
 #endif
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/include/asm-arm/atomic.h b/xen/include/asm-arm/atomic.h
index 29ab265..32771e9 100644
--- a/xen/include/asm-arm/atomic.h
+++ b/xen/include/asm-arm/atomic.h
@@ -138,7 +138,21 @@ static inline void _atomic_set(atomic_t *v, int i)
 # error "unknown ARM variant"
 #endif
 
+#define atomic_inc(v)       atomic_add(1, v)
+#define atomic_dec(v)       atomic_sub(1, v)
+
+#define atomic_inc_and_test(v)  (atomic_add_return(1, v) == 0)
+#define atomic_dec_and_test(v)  (atomic_sub_return(1, v) == 0)
+#define atomic_inc_return(v)    (atomic_add_return(1, v))
+#define atomic_dec_return(v)    (atomic_sub_return(1, v))
+#define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
+
+#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)
+
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
 #endif /* __ARCH_ARM_ATOMIC__ */
+
 /*
  * Local variables:
  * mode: C
-- 
2.5.0


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
https://lists.xen.org/xen-devel

 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.