|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Minios-devel] [UNIKRAFT PATCH v5 10/15] include/uk: prefix functions in bitmap.h and bitopts.h
This patch adds prefix to majority of the macros/functions. For the
sake of tractability, patch touches only functions either not
used (yet) or used no more then a couple of times
Signed-off-by: Yuri Volchkov <yuri.volchkov@xxxxxxxxx>
Reviewed-by: Florian Schmidt <florian.schmidt@xxxxxxxxx>
---
include/uk/bitmap.h | 40 +++++++++++++++----------------
include/uk/bitops.h | 58 +++++++++++++++++++++++----------------------
2 files changed, 50 insertions(+), 48 deletions(-)
diff --git a/include/uk/bitmap.h b/include/uk/bitmap.h
index 13178b6..cd0f098 100644
--- a/include/uk/bitmap.h
+++ b/include/uk/bitmap.h
@@ -33,13 +33,13 @@
#include <uk/bitops.h>
static inline void
-bitmap_zero(unsigned long *addr, const unsigned int size)
+uk_bitmap_zero(unsigned long *addr, const unsigned int size)
{
memset(addr, 0, BITS_TO_LONGS(size) * sizeof(long));
}
static inline void
-bitmap_fill(unsigned long *addr, const unsigned int size)
+uk_bitmap_fill(unsigned long *addr, const unsigned int size)
{
const unsigned int tail = size & (BITS_PER_LONG - 1);
@@ -50,7 +50,7 @@ bitmap_fill(unsigned long *addr, const unsigned int size)
}
static inline int
-bitmap_full(unsigned long *addr, const unsigned int size)
+uk_bitmap_full(unsigned long *addr, const unsigned int size)
{
const unsigned int end = BIT_WORD(size);
const unsigned int tail = size & (BITS_PER_LONG - 1);
@@ -71,7 +71,7 @@ bitmap_full(unsigned long *addr, const unsigned int size)
}
static inline int
-bitmap_empty(unsigned long *addr, const unsigned int size)
+uk_bitmap_empty(unsigned long *addr, const unsigned int size)
{
const unsigned int end = BIT_WORD(size);
const unsigned int tail = size & (BITS_PER_LONG - 1);
@@ -92,7 +92,7 @@ bitmap_empty(unsigned long *addr, const unsigned int size)
}
static inline void
-bitmap_set(unsigned long *map, unsigned int start, int nr)
+uk_bitmap_set(unsigned long *map, unsigned int start, int nr)
{
const unsigned int size = start + nr;
int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
@@ -115,7 +115,7 @@ bitmap_set(unsigned long *map, unsigned int start, int nr)
}
static inline void
-bitmap_clear(unsigned long *map, unsigned int start, int nr)
+uk_bitmap_clear(unsigned long *map, unsigned int start, int nr)
{
const unsigned int size = start + nr;
int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
@@ -138,7 +138,7 @@ bitmap_clear(unsigned long *map, unsigned int start, int nr)
}
static inline unsigned int
-bitmap_find_next_zero_area_off(const unsigned long *map,
+uk_bitmap_find_next_zero_area_off(const unsigned long *map,
const unsigned int size, unsigned int start,
unsigned int nr, unsigned int align_mask,
unsigned int align_offset)
@@ -148,7 +148,7 @@ bitmap_find_next_zero_area_off(const unsigned long *map,
unsigned int i;
retry:
- index = find_next_zero_bit(map, size, start);
+ index = uk_find_next_zero_bit(map, size, start);
index = (((index + align_offset) + align_mask) & ~align_mask) -
align_offset;
@@ -157,7 +157,7 @@ retry:
if (end > size)
return (end);
- i = find_next_bit(map, end, index);
+ i = uk_find_next_bit(map, end, index);
if (i < end) {
start = i + 1;
goto retry;
@@ -166,16 +166,16 @@ retry:
}
static inline unsigned int
-bitmap_find_next_zero_area(const unsigned long *map,
+uk_bitmap_find_next_zero_area(const unsigned long *map,
const unsigned int size, unsigned int start,
unsigned int nr, unsigned int align_mask)
{
- return (bitmap_find_next_zero_area_off(map, size,
+ return (uk_bitmap_find_next_zero_area_off(map, size,
start, nr, align_mask, 0));
}
static inline int
-bitmap_find_free_region(unsigned long *bitmap, int bits, int order)
+uk_bitmap_find_free_region(unsigned long *bitmap, int bits, int order)
{
int pos;
int end;
@@ -190,7 +190,7 @@ bitmap_find_free_region(unsigned long *bitmap, int bits,
int order)
}
static inline int
-bitmap_allocate_region(unsigned long *bitmap, int pos, int order)
+uk_bitmap_allocate_region(unsigned long *bitmap, int pos, int order)
{
if (!linux_reg_op(bitmap, pos, order, REG_OP_ISFREE))
return (-EBUSY);
@@ -199,13 +199,13 @@ bitmap_allocate_region(unsigned long *bitmap, int pos,
int order)
}
static inline void
-bitmap_release_region(unsigned long *bitmap, int pos, int order)
+uk_bitmap_release_region(unsigned long *bitmap, int pos, int order)
{
linux_reg_op(bitmap, pos, order, REG_OP_RELEASE);
}
static inline unsigned int
-bitmap_weight(unsigned long *addr, const unsigned int size)
+uk_bitmap_weight(unsigned long *addr, const unsigned int size)
{
const unsigned int end = BIT_WORD(size);
const unsigned int tail = size & (BITS_PER_LONG - 1);
@@ -224,7 +224,7 @@ bitmap_weight(unsigned long *addr, const unsigned int size)
}
static inline int
-bitmap_equal(const unsigned long *pa,
+uk_bitmap_equal(const unsigned long *pa,
const unsigned long *pb, unsigned int size)
{
const unsigned int end = BIT_WORD(size);
@@ -246,7 +246,7 @@ bitmap_equal(const unsigned long *pa,
}
static inline void
-bitmap_complement(unsigned long *dst, const unsigned long *src,
+uk_bitmap_complement(unsigned long *dst, const unsigned long *src,
const unsigned int size)
{
const unsigned int end = BITS_TO_LONGS(size);
@@ -257,7 +257,7 @@ bitmap_complement(unsigned long *dst, const unsigned long
*src,
}
static inline void
-bitmap_or(unsigned long *dst, const unsigned long *src1,
+uk_bitmap_or(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, const unsigned int size)
{
const unsigned int end = BITS_TO_LONGS(size);
@@ -268,7 +268,7 @@ bitmap_or(unsigned long *dst, const unsigned long *src1,
}
static inline void
-bitmap_and(unsigned long *dst, const unsigned long *src1,
+uk_bitmap_and(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, const unsigned int size)
{
const unsigned int end = BITS_TO_LONGS(size);
@@ -279,7 +279,7 @@ bitmap_and(unsigned long *dst, const unsigned long *src1,
}
static inline void
-bitmap_xor(unsigned long *dst, const unsigned long *src1,
+uk_bitmap_xor(unsigned long *dst, const unsigned long *src1,
const unsigned long *src2, const unsigned int size)
{
const unsigned int end = BITS_TO_LONGS(size);
diff --git a/include/uk/bitops.h b/include/uk/bitops.h
index e0a563a..8555022 100644
--- a/include/uk/bitops.h
+++ b/include/uk/bitops.h
@@ -39,8 +39,9 @@
#include <uk/arch/lcpu.h>
#include <uk/arch/atomic.h>
-#define BIT(nr) (1UL << (nr))
-#define BIT_ULL(nr) (1ULL << (nr))
+#define UK_BIT(nr) (1UL << (nr))
+#define UK_BIT_ULL(nr) (1ULL << (nr))
+
#ifdef __LP64__
#define BITS_PER_LONG 64
#else
@@ -52,11 +53,12 @@
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) %
BITS_PER_LONG))
#define BITMAP_LAST_WORD_MASK(n) (~0UL >> (BITS_PER_LONG - (n)))
#define BITS_TO_LONGS(n) howmany((n), BITS_PER_LONG)
-#define BIT_MASK(nr) (1UL << ((nr) & (BITS_PER_LONG -
1)))
+#define UK_BIT_MASK(nr) \
+ (1UL << ((nr) & (BITS_PER_LONG - 1)))
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
-#define GENMASK(h, l) \
+#define UK_GENMASK(h, l) \
(((~0UL) >> (BITS_PER_LONG - (h) - 1)) & ((~0UL) << (l)))
-#define GENMASK_ULL(h, l) \
+#define UK_GENMASK_ULL(h, l) \
(((~0ULL) >> (BITS_PER_LONG_LONG - (h) - 1)) & ((~0ULL) << (l)))
#define BITS_PER_BYTE 8
@@ -75,12 +77,12 @@ fls64(__u64 mask)
#endif
static inline __u32
-ror32(__u32 word, unsigned int shift)
+uk_ror32(__u32 word, unsigned int shift)
{
return ((word >> shift) | (word << (32 - shift)));
}
-static inline int get_count_order(unsigned int count)
+static inline int uk_get_count_order(unsigned int count)
{
int order;
@@ -91,7 +93,7 @@ static inline int get_count_order(unsigned int count)
}
static inline unsigned long
-find_first_bit(const unsigned long *addr, unsigned long size)
+uk_find_first_bit(const unsigned long *addr, unsigned long size)
{
long mask;
int bit;
@@ -113,7 +115,7 @@ find_first_bit(const unsigned long *addr, unsigned long
size)
}
static inline unsigned long
-find_first_zero_bit(const unsigned long *addr, unsigned long size)
+uk_find_first_zero_bit(const unsigned long *addr, unsigned long size)
{
long mask;
int bit;
@@ -135,7 +137,7 @@ find_first_zero_bit(const unsigned long *addr, unsigned
long size)
}
static inline unsigned long
-find_last_bit(const unsigned long *addr, unsigned long size)
+uk_find_last_bit(const unsigned long *addr, unsigned long size)
{
long mask;
int offs;
@@ -161,7 +163,7 @@ find_last_bit(const unsigned long *addr, unsigned long size)
}
static inline unsigned long
-find_next_bit(const unsigned long *addr, unsigned long size,
+uk_find_next_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
long mask;
@@ -201,7 +203,7 @@ find_next_bit(const unsigned long *addr, unsigned long size,
}
static inline unsigned long
-find_next_zero_bit(const unsigned long *addr, unsigned long size,
+uk_find_next_zero_bit(const unsigned long *addr, unsigned long size,
unsigned long offset)
{
long mask;
@@ -240,35 +242,35 @@ find_next_zero_bit(const unsigned long *addr, unsigned
long size,
return (bit);
}
-/* set_bit and clear_bit are atomic and protected against
+/* uk_set_bit and uk_clear_bit are atomic and protected against
* reordering (do barriers), while the underscored (__*) versions of
* them don't (not atomic).
*/
-#define __set_bit(i, a) ukarch_set_bit(i, a)
-#define set_bit(i, a) ukarch_set_bit_sync(i, a)
-#define __clear_bit(i, a) ukarch_clr_bit(i, a)
-#define clear_bit(i, a) ukarch_clr_bit_sync(i, a)
+#define __uk_set_bit(i, a) ukarch_set_bit(i, a)
+#define uk_set_bit(i, a) ukarch_set_bit_sync(i, a)
+#define __uk_clear_bit(i, a) ukarch_clr_bit(i, a)
+#define uk_clear_bit(i, a) ukarch_clr_bit_sync(i, a)
static inline int
-test_and_clear_bit(long bit, volatile unsigned long *var)
+uk_test_and_clear_bit(long bit, volatile unsigned long *var)
{
return ukarch_test_and_clr_bit_sync(bit, (volatile void *) var);
}
static inline int
-__test_and_clear_bit(long bit, volatile unsigned long *var)
+__uk_test_and_clear_bit(long bit, volatile unsigned long *var)
{
return ukarch_test_and_clr_bit(bit, (volatile void *) var);
}
static inline int
-test_and_set_bit(long bit, volatile unsigned long *var)
+uk_test_and_set_bit(long bit, volatile unsigned long *var)
{
return ukarch_test_and_set_bit_sync(bit, (volatile void *) var);
}
static inline int
-__test_and_set_bit(long bit, volatile unsigned long *var)
+__uk_test_and_set_bit(long bit, volatile unsigned long *var)
{
return ukarch_test_and_set_bit(bit, (volatile void *) var);
}
@@ -324,18 +326,18 @@ done:
return ret;
}
-#define for_each_set_bit(bit, addr, size) \
- for ((bit) = find_first_bit((addr), (size)); \
+#define uk_for_each_set_bit(bit, addr, size) \
+ for ((bit) = uk_find_first_bit((addr), (size)); \
(bit) < (size); \
- (bit) = find_next_bit((addr), (size), (bit) + 1))
+ (bit) = uk_find_next_bit((addr), (size), (bit) + 1))
-#define for_each_clear_bit(bit, addr, size) \
- for ((bit) = find_first_zero_bit((addr), (size)); \
+#define uk_for_each_clear_bit(bit, addr, size) \
+ for ((bit) = uk_find_first_zero_bit((addr), (size)); \
(bit) < (size); \
- (bit) = find_next_zero_bit((addr), (size), (bit) + 1))
+ (bit) = uk_find_next_zero_bit((addr), (size), (bit) + 1))
static inline __u64
-sign_extend64(__u64 value, int index)
+uk_sign_extend64(__u64 value, int index)
{
__u8 shift = 63 - index;
--
2.18.0
_______________________________________________
Minios-devel mailing list
Minios-devel@xxxxxxxxxxxxxxxxxxxx
https://lists.xenproject.org/mailman/listinfo/minios-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |