|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [PATCH v5 2/5] xen/ppc: Implement bitops.h
On 12.09.2023 20:35, Shawn Anastasio wrote:
> Implement bitops.h, based on Linux's implementation as of commit
> 5321d1b1afb9a17302c6cec79f0cbf823eb0d3fc. Though it is based off of
> Linux's implementation, this code diverges significantly in a number of
> ways:
> - Bitmap entries changed to 32-bit words to match X86 and Arm on Xen
> - PPC32-specific code paths dropped
> - Formatting completely re-done to more closely line up with Xen.
> Including 4 space indentation.
> - Use GCC's __builtin_popcount* for hweight* implementation
>
> Signed-off-by: Shawn Anastasio <sanastasio@xxxxxxxxxxxxxxxxxxxxx>
> Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
> ---
> v5:
> - Switch lingering ulong bitop parameters/return values to uint.
>
> v4:
> - Mention __builtin_popcount impelmentation of hweight* in message
> - Change type of BITOP_MASK expression from unsigned long to unsigned
> int
> - Fix remaining underscore-prefixed macro params/vars
> - Fix line wrapping in test_and_clear_bit{,s}
> - Change type of test_and_clear_bits' pointer parameter from void *
> to unsigned int *. This was already done for other functions but
> missed here.
> - De-macroize test_and_set_bits.
> - Fix formatting of ffs{,l} macro's unary operators
> - Drop extra blank in ffz() macro definition
>
> v3:
> - Fix style of inline asm blocks.
> - Fix underscore-prefixed macro parameters/variables
> - Use __builtin_popcount for hweight* implementation
> - Update C functions to use proper Xen coding style
>
> v2:
> - Clarify changes from Linux implementation in commit message
> - Use PPC_ATOMIC_{ENTRY,EXIT}_BARRIER macros from <asm/memory.h> instead
> of hardcoded "sync" instructions in inline assembly blocks.
> - Fix macro line-continuing backslash spacing
> - Fix hard-tab usage in find_*_bit C functions.
>
> xen/arch/ppc/include/asm/bitops.h | 332 +++++++++++++++++++++++++++++-
> 1 file changed, 329 insertions(+), 3 deletions(-)
>
> diff --git a/xen/arch/ppc/include/asm/bitops.h
> b/xen/arch/ppc/include/asm/bitops.h
> index 548e97b414..0f75ff3f9d 100644
> --- a/xen/arch/ppc/include/asm/bitops.h
> +++ b/xen/arch/ppc/include/asm/bitops.h
> @@ -1,9 +1,335 @@
> +/* SPDX-License-Identifier: GPL-2.0-or-later */
> +/*
> + * Adapted from Linux's arch/powerpc/include/asm/bitops.h.
> + *
> + * Merged version by David Gibson <david@xxxxxxxxxxxxxxxxxxxxx>.
> + * Based on ppc64 versions by: Dave Engebretsen, Todd Inglett, Don
> + * Reed, Pat McCarthy, Peter Bergner, Anton Blanchard. They
> + * originally took it from the ppc32 code.
> + */
> #ifndef _ASM_PPC_BITOPS_H
> #define _ASM_PPC_BITOPS_H
>
> +#include <asm/memory.h>
> +
> +#define __set_bit(n, p) set_bit(n, p)
> +#define __clear_bit(n, p) clear_bit(n, p)
> +
> +#define BITOP_BITS_PER_WORD 32
> +#define BITOP_MASK(nr) (1U << ((nr) % BITOP_BITS_PER_WORD))
> +#define BITOP_WORD(nr) ((nr) / BITOP_BITS_PER_WORD)
> +#define BITS_PER_BYTE 8
> +
> /* PPC bit number conversion */
> -#define PPC_BITLSHIFT(be) (BITS_PER_LONG - 1 - (be))
> -#define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit))
> -#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
> +#define PPC_BITLSHIFT(be) (BITS_PER_LONG - 1 - (be))
> +#define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit))
> +#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
> +
> +/* Macro for generating the ***_bits() functions */
> +#define DEFINE_BITOP(fn, op, prefix)
> \
> +static inline void fn(unsigned int mask,
> \
> + volatile unsigned int *p_)
> \
> +{
> \
> + unsigned int old;
> \
> + unsigned int *p = (unsigned int *)p_;
> \
What use is this, when ...
> + asm volatile ( prefix
> \
> + "1: lwarx %0,0,%3,0\n"
> \
> + #op "%I2 %0,%0,%2\n"
> \
> + "stwcx. %0,0,%3\n"
> \
> + "bne- 1b\n"
> \
> + : "=&r" (old), "+m" (*p)
> \
... the "+m" operand isn't used and ...
> + : "rK" (mask), "r" (p)
> \
> + : "cc", "memory" );
> \
... there's a memory clobber anyway?
Also (nit) note that the long -> int change has caused some of the
backslashes to no longer align.
> +}
> +
> +DEFINE_BITOP(set_bits, or, "")
> +DEFINE_BITOP(change_bits, xor, "")
Are there further plans to add uses of DEFINE_BITOP() with the last argument
not an empty string? If not, how about simplifying things by dropping the
macro parameter?
> +#define DEFINE_CLROP(fn, prefix)
> \
> +static inline void fn(unsigned int mask, volatile unsigned int *p_)
> \
> +{
> \
> + unsigned int old;
> \
> + unsigned int *p = (unsigned int *)p_;
> \
> + asm volatile ( prefix
> \
> + "1: lwarx %0,0,%3,0\n"
> \
> + "andc %0,%0,%2\n"
> \
> + "stwcx. %0,0,%3\n"
> \
> + "bne- 1b\n"
> \
> + : "=&r" (old), "+m" (*p)
> \
> + : "r" (mask), "r" (p)
> \
> + : "cc", "memory" );
> \
> +}
> +
> +DEFINE_CLROP(clear_bits, "")
Same question here.
> +static inline void set_bit(int nr, volatile void *addr)
> +{
> + set_bits(BITOP_MASK(nr), (volatile unsigned int *)addr + BITOP_WORD(nr));
> +}
> +static inline void clear_bit(int nr, volatile void *addr)
> +{
> + clear_bits(BITOP_MASK(nr), (volatile unsigned int *)addr +
> BITOP_WORD(nr));
> +}
> +
> +/**
> + * test_bit - Determine whether a bit is set
> + * @nr: bit number to test
> + * @addr: Address to start counting from
> + */
> +static inline int test_bit(int nr, const volatile void *addr)
> +{
> + const volatile unsigned int *p = addr;
> + return 1 & (p[BITOP_WORD(nr)] >> (nr & (BITOP_BITS_PER_WORD - 1)));
> +}
> +
> +static inline unsigned int test_and_clear_bits(
> + unsigned int mask,
> + volatile unsigned int *p)
> +{
> + unsigned int old, t;
> +
> + asm volatile ( PPC_ATOMIC_ENTRY_BARRIER
> + "1: lwarx %0,0,%3,0\n"
> + "andc %1,%0,%2\n"
> + "stwcx. %1,0,%3\n"
> + "bne- 1b\n"
> + PPC_ATOMIC_EXIT_BARRIER
> + : "=&r" (old), "=&r" (t)
> + : "r" (mask), "r" (p)
> + : "cc", "memory" );
> +
> + return (old & mask);
> +}
> +
> +static inline int test_and_clear_bit(unsigned int nr,
> + volatile void *addr)
> +{
> + return test_and_clear_bits(BITOP_MASK(nr), (volatile unsigned int *)addr
> +
> + BITOP_WORD(nr)) != 0;
Didn't you indicate you'd adjust the odd wrapping?
> +}
> +
> +static inline unsigned int test_and_set_bits(
> + unsigned int mask,
> + volatile unsigned int *p)
> +{
> + unsigned int old, t;
> +
> + asm volatile ( PPC_ATOMIC_ENTRY_BARRIER
> + "1: lwarx %0,0,%3,0\n"
> + "or%I2 %1,%0,%2\n"
> + "stwcx. %1,0,%3\n"
> + "bne- 1b\n"
> + PPC_ATOMIC_EXIT_BARRIER
> + : "=&r" (old), "=&r" (t)
> + : "rK" (mask), "r" (p)
> + : "cc", "memory" );
> +
> + return (old & mask);
> +}
> +
> +static inline int test_and_set_bit(unsigned int nr, volatile void *addr)
> +{
> + return test_and_set_bits(BITOP_MASK(nr), (volatile unsigned int *)addr +
> + BITOP_WORD(nr)) != 0;
Same here then.
Jan
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |