[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[PATCH] xen: Consistently use alignof()



We have a mix of all 3 spellings in Xen, as well as having compatibility in
compiler.h for older C standards.

Remove the use of __alignof() and __alignof__(), which reduced code volume a
little.

No functional change.

Signed-off-by: Andrew Cooper <andrew.cooper3@xxxxxxxxxx>
---
CC: Jan Beulich <JBeulich@xxxxxxxx>
CC: Roger Pau Monné <roger.pau@xxxxxxxxxx>
CC: Stefano Stabellini <sstabellini@xxxxxxxxxx>
CC: Julien Grall <julien@xxxxxxx>
CC: Volodymyr Babchuk <Volodymyr_Babchuk@xxxxxxxx>
CC: Bertrand Marquis <bertrand.marquis@xxxxxxx>
CC: Michal Orzel <michal.orzel@xxxxxxx>

x86_emulate.c and compat-build-header.py still use __alignof() because they
have code used outside of Xen.

The compiler.h compatibilty checks for < C11, whereas it was C23 when they
became keywords.  Nothing seems to mind, so I've left it alone.
---
 xen/arch/x86/xstate.c                |  4 ++--
 xen/common/coverage/gcc_3_4.c        |  4 ++--
 xen/common/device-tree/device-tree.c |  8 ++++----
 xen/include/xen/config.h             |  2 +-
 xen/include/xen/percpu.h             |  2 +-
 xen/include/xen/xmalloc.h            | 16 ++++++++--------
 xen/include/xen/xvmalloc.h           | 16 ++++++++--------
 7 files changed, 26 insertions(+), 26 deletions(-)

diff --git a/xen/arch/x86/xstate.c b/xen/arch/x86/xstate.c
index 3d249518a1b7..d423bf7978da 100644
--- a/xen/arch/x86/xstate.c
+++ b/xen/arch/x86/xstate.c
@@ -542,8 +542,8 @@ int xstate_alloc_save_area(struct vcpu *v)
     }
 
     /* XSAVE/XRSTOR requires the save area be 64-byte-boundary aligned. */
-    BUILD_BUG_ON(__alignof(*save_area) < 64);
-    save_area = _xvzalloc(size, __alignof(*save_area));
+    BUILD_BUG_ON(alignof(*save_area) < 64);
+    save_area = _xvzalloc(size, alignof(*save_area));
     if ( save_area == NULL )
         return -ENOMEM;
 
diff --git a/xen/common/coverage/gcc_3_4.c b/xen/common/coverage/gcc_3_4.c
index 3631f4bc2535..393e3c866d74 100644
--- a/xen/common/coverage/gcc_3_4.c
+++ b/xen/common/coverage/gcc_3_4.c
@@ -199,8 +199,8 @@ static size_t get_fn_size(const struct gcov_info *info)
 
     size = sizeof(struct gcov_fn_info) + num_counter_active(info) *
         sizeof(unsigned int);
-    if ( __alignof__(struct gcov_fn_info) > sizeof(unsigned int) )
-        size = ROUNDUP(size, __alignof__(struct gcov_fn_info));
+    if ( alignof(struct gcov_fn_info) > sizeof(unsigned int) )
+        size = ROUNDUP(size, alignof(struct gcov_fn_info));
     return size;
 }
 
diff --git a/xen/common/device-tree/device-tree.c 
b/xen/common/device-tree/device-tree.c
index 90fee2ba0315..e4dea4b42a1d 100644
--- a/xen/common/device-tree/device-tree.c
+++ b/xen/common/device-tree/device-tree.c
@@ -1885,7 +1885,7 @@ static unsigned long unflatten_dt_node(const void *fdt,
     }
 
     np = unflatten_dt_alloc(&mem, sizeof(struct dt_device_node) + allocl,
-                            __alignof__(struct dt_device_node));
+                            alignof(struct dt_device_node));
     if ( allnextpp )
     {
         memset(np, 0, sizeof(*np));
@@ -1963,7 +1963,7 @@ static unsigned long unflatten_dt_node(const void *fdt,
             has_name = 1;
         l = strlen(pname) + 1;
         pp = unflatten_dt_alloc(&mem, sizeof(struct dt_property),
-                                __alignof__(struct dt_property));
+                                alignof(struct dt_property));
         if ( allnextpp )
         {
             /* We accept flattened tree phandles either in
@@ -2010,7 +2010,7 @@ static unsigned long unflatten_dt_node(const void *fdt,
             pa = p1;
         sz = (pa - ps) + 1;
         pp = unflatten_dt_alloc(&mem, sizeof(struct dt_property) + sz,
-                                __alignof__(struct dt_property));
+                                alignof(struct dt_property));
         if ( allnextpp )
         {
             pp->name = "name";
@@ -2088,7 +2088,7 @@ int unflatten_device_tree(const void *fdt, struct 
dt_device_node **mynodes)
     dt_dprintk("  size is %#lx allocating...\n", size);
 
     /* Allocate memory for the expanded device tree */
-    mem = (unsigned long)_xmalloc (size + 4, __alignof__(struct 
dt_device_node));
+    mem = (unsigned long)_xmalloc(size + 4, alignof(struct dt_device_node));
     if ( !mem )
         return -ENOMEM;
 
diff --git a/xen/include/xen/config.h b/xen/include/xen/config.h
index 1d7195066c08..479c3bac8925 100644
--- a/xen/include/xen/config.h
+++ b/xen/include/xen/config.h
@@ -105,7 +105,7 @@
 #define BITS_PER_LONG   (BITS_PER_BYTE * BYTES_PER_LONG)
 #define BITS_PER_LLONG  (BITS_PER_BYTE * __SIZEOF_LONG_LONG__)
 
-/* It is assumed that sizeof(void *) == __alignof(void *) */
+/* It is assumed that sizeof(void *) == alignof(void *) */
 #define POINTER_ALIGN   __SIZEOF_POINTER__
 
 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
diff --git a/xen/include/xen/percpu.h b/xen/include/xen/percpu.h
index e7f585c7ed69..62a6259b2bc1 100644
--- a/xen/include/xen/percpu.h
+++ b/xen/include/xen/percpu.h
@@ -18,7 +18,7 @@
 
 #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
     typedef char name ## _chk_t \
-        [BUILD_BUG_ON_ZERO(__alignof(type) & (PAGE_SIZE - 1))]; \
+        [BUILD_BUG_ON_ZERO(alignof(type) & (PAGE_SIZE - 1))]; \
     __DEFINE_PER_CPU(__section(".bss.percpu.page_aligned"), \
                      type, _ ## name)
 
diff --git a/xen/include/xen/xmalloc.h b/xen/include/xen/xmalloc.h
index f0412fb4e021..b7616c3b9038 100644
--- a/xen/include/xen/xmalloc.h
+++ b/xen/include/xen/xmalloc.h
@@ -13,8 +13,8 @@
  */
 
 /* Allocate space for typed object. */
-#define xmalloc(_type) ((_type *)_xmalloc(sizeof(_type), __alignof__(_type)))
-#define xzalloc(_type) ((_type *)_xzalloc(sizeof(_type), __alignof__(_type)))
+#define xmalloc(_type) ((_type *)_xmalloc(sizeof(_type), alignof(_type)))
+#define xzalloc(_type) ((_type *)_xzalloc(sizeof(_type), alignof(_type)))
 
 /*
  * Allocate space for a typed object and copy an existing instance.
@@ -34,24 +34,24 @@
 
 /* Allocate space for array of typed objects. */
 #define xmalloc_array(_type, _num) \
-    ((_type *)_xmalloc_array(sizeof(_type), __alignof__(_type), _num))
+    ((_type *)_xmalloc_array(sizeof(_type), alignof(_type), _num))
 #define xzalloc_array(_type, _num) \
-    ((_type *)_xzalloc_array(sizeof(_type), __alignof__(_type), _num))
+    ((_type *)_xzalloc_array(sizeof(_type), alignof(_type), _num))
 #define xrealloc_array(_ptr, _num)                                  \
     ((typeof(_ptr))_xrealloc_array(_ptr, sizeof(typeof(*(_ptr))),   \
-                                   __alignof__(typeof(*(_ptr))), _num))
+                                   alignof(typeof(*(_ptr))), _num))
 
 /* Allocate space for a structure with a flexible array of typed objects. */
 #define xzalloc_flex_struct(type, field, nr) \
-    ((type *)_xzalloc(offsetof(type, field[nr]), __alignof__(type)))
+    ((type *)_xzalloc(offsetof(type, field[nr]), alignof(type)))
 
 #define xmalloc_flex_struct(type, field, nr) \
-    ((type *)_xmalloc(offsetof(type, field[nr]), __alignof__(type)))
+    ((type *)_xmalloc(offsetof(type, field[nr]), alignof(type)))
 
 /* Re-allocate space for a structure with a flexible array of typed objects. */
 #define xrealloc_flex_struct(ptr, field, nr)                           \
     ((typeof(ptr))_xrealloc(ptr, offsetof(typeof(*(ptr)), field[nr]),  \
-                            __alignof__(typeof(*(ptr)))))
+                            alignof(typeof(*(ptr)))))
 
 /* Allocate untyped storage. */
 #define xmalloc_bytes(_bytes) _xmalloc(_bytes, SMP_CACHE_BYTES)
diff --git a/xen/include/xen/xvmalloc.h b/xen/include/xen/xvmalloc.h
index 7686d49f8154..d2288c175814 100644
--- a/xen/include/xen/xvmalloc.h
+++ b/xen/include/xen/xvmalloc.h
@@ -10,13 +10,13 @@
  */
 
 /* Allocate space for typed object. */
-#define xvmalloc(_type) ((_type *)_xvmalloc(sizeof(_type), __alignof__(_type)))
-#define xvzalloc(_type) ((_type *)_xvzalloc(sizeof(_type), __alignof__(_type)))
+#define xvmalloc(_type) ((_type *)_xvmalloc(sizeof(_type), alignof(_type)))
+#define xvzalloc(_type) ((_type *)_xvzalloc(sizeof(_type), alignof(_type)))
 
 /* Allocate space for a typed object and copy an existing instance. */
 #define xvmemdup(ptr)                                          \
 ({                                                             \
-    void *p_ = _xvmalloc(sizeof(*(ptr)), __alignof__(*(ptr))); \
+    void *p_ = _xvmalloc(sizeof(*(ptr)), alignof(*(ptr))); \
     if ( p_ )                                                  \
         memcpy(p_, ptr, sizeof(*(ptr)));                       \
     (typeof(*(ptr)) *)p_;                                      \
@@ -24,21 +24,21 @@
 
 /* Allocate space for array of typed objects. */
 #define xvmalloc_array(_type, _num) \
-    ((_type *)_xvmalloc_array(sizeof(_type), __alignof__(_type), _num))
+    ((_type *)_xvmalloc_array(sizeof(_type), alignof(_type), _num))
 #define xvzalloc_array(_type, _num) \
-    ((_type *)_xvzalloc_array(sizeof(_type), __alignof__(_type), _num))
+    ((_type *)_xvzalloc_array(sizeof(_type), alignof(_type), _num))
 
 /* Allocate space for a structure with a flexible array of typed objects. */
 #define xvzalloc_flex_struct(type, field, nr) \
-    ((type *)_xvzalloc(offsetof(type, field[nr]), __alignof__(type)))
+    ((type *)_xvzalloc(offsetof(type, field[nr]), alignof(type)))
 
 #define xvmalloc_flex_struct(type, field, nr) \
-    ((type *)_xvmalloc(offsetof(type, field[nr]), __alignof__(type)))
+    ((type *)_xvmalloc(offsetof(type, field[nr]), alignof(type)))
 
 /* Re-allocate space for a structure with a flexible array of typed objects. */
 #define xvrealloc_flex_struct(ptr, field, nr)                          \
     ((typeof(ptr))_xvrealloc(ptr, offsetof(typeof(*(ptr)), field[nr]), \
-                             __alignof__(typeof(*(ptr)))))
+                             alignof(typeof(*(ptr)))))
 
 #ifdef CONFIG_HAS_VMAP
 
-- 
2.39.5




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.