[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen master] xen: arm64: basic config and types headers



commit ae01113535e9c1f7220e72ebe602030dd82b4fb0
Author:     Ian Campbell <ian.campbell@xxxxxxxxxx>
AuthorDate: Fri Feb 22 08:57:45 2013 +0000
Commit:     Ian Campbell <ian.campbell@xxxxxxxxxx>
CommitDate: Fri Feb 22 12:14:51 2013 +0000

    xen: arm64: basic config and types headers
    
    The 64-bit bitops are taken from the Linux asm-generic implementations. They
    should be replaced with optimised versions from the Linux arm64 port when 
they
    become available.
    
    Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
    Acked-by: Tim Deegan <tim@xxxxxxx>
---
 xen/arch/arm/arm64/Makefile            |    2 +
 xen/arch/arm/arm64/lib/Makefile        |    1 +
 xen/arch/arm/arm64/lib/bitops.c        |   22 +++
 xen/arch/arm/arm64/lib/find_next_bit.c |  284 ++++++++++++++++++++++++++++++++
 xen/include/asm-arm/arm32/bitops.h     |   54 ++++++
 xen/include/asm-arm/arm64/bitops.h     |  283 +++++++++++++++++++++++++++++++
 xen/include/asm-arm/bitops.h           |   65 ++------
 xen/include/asm-arm/config.h           |   15 ++
 xen/include/asm-arm/types.h            |   17 ++-
 9 files changed, 686 insertions(+), 57 deletions(-)

diff --git a/xen/arch/arm/arm64/Makefile b/xen/arch/arm/arm64/Makefile
index dffbeb1..c447eaa 100644
--- a/xen/arch/arm/arm64/Makefile
+++ b/xen/arch/arm/arm64/Makefile
@@ -1 +1,3 @@
+subdir-y += lib
+
 obj-y += mode_switch.o
diff --git a/xen/arch/arm/arm64/lib/Makefile b/xen/arch/arm/arm64/lib/Makefile
new file mode 100644
index 0000000..32c02c4
--- /dev/null
+++ b/xen/arch/arm/arm64/lib/Makefile
@@ -0,0 +1 @@
+obj-y += bitops.o find_next_bit.o
diff --git a/xen/arch/arm/arm64/lib/bitops.c b/xen/arch/arm/arm64/lib/bitops.c
new file mode 100644
index 0000000..02d8d78
--- /dev/null
+++ b/xen/arch/arm/arm64/lib/bitops.c
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2012 ARM Limited
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <xen/spinlock.h>
+#include <xen/bitops.h>
+
+spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] /*__lock_aligned*/ = {
+       [0 ... (ATOMIC_HASH_SIZE-1)]  = SPIN_LOCK_UNLOCKED
+};
diff --git a/xen/arch/arm/arm64/lib/find_next_bit.c 
b/xen/arch/arm/arm64/lib/find_next_bit.c
new file mode 100644
index 0000000..aea69c2
--- /dev/null
+++ b/xen/arch/arm/arm64/lib/find_next_bit.c
@@ -0,0 +1,284 @@
+/* find_next_bit.c: fallback find next bit implementation
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@xxxxxxxxxx)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <xen/config.h>
+#include <xen/bitops.h>
+#include <asm/types.h>
+#include <asm/byteorder.h>
+
+#define BITOP_WORD(nr)         ((nr) / BITS_PER_LONG)
+
+#ifndef find_next_bit
+/*
+ * Find the next set bit in a memory region.
+ */
+unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
+                           unsigned long offset)
+{
+       const unsigned long *p = addr + BITOP_WORD(offset);
+       unsigned long result = offset & ~(BITS_PER_LONG-1);
+       unsigned long tmp;
+
+       if (offset >= size)
+               return size;
+       size -= result;
+       offset %= BITS_PER_LONG;
+       if (offset) {
+               tmp = *(p++);
+               tmp &= (~0UL << offset);
+               if (size < BITS_PER_LONG)
+                       goto found_first;
+               if (tmp)
+                       goto found_middle;
+               size -= BITS_PER_LONG;
+               result += BITS_PER_LONG;
+       }
+       while (size & ~(BITS_PER_LONG-1)) {
+               if ((tmp = *(p++)))
+                       goto found_middle;
+               result += BITS_PER_LONG;
+               size -= BITS_PER_LONG;
+       }
+       if (!size)
+               return result;
+       tmp = *p;
+
+found_first:
+       tmp &= (~0UL >> (BITS_PER_LONG - size));
+       if (tmp == 0UL)         /* Are any bits set? */
+               return result + size;   /* Nope. */
+found_middle:
+       return result + __ffs(tmp);
+}
+EXPORT_SYMBOL(find_next_bit);
+#endif
+
+#ifndef find_next_zero_bit
+/*
+ * This implementation of find_{first,next}_zero_bit was stolen from
+ * Linus' asm-alpha/bitops.h.
+ */
+unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
+                                unsigned long offset)
+{
+       const unsigned long *p = addr + BITOP_WORD(offset);
+       unsigned long result = offset & ~(BITS_PER_LONG-1);
+       unsigned long tmp;
+
+       if (offset >= size)
+               return size;
+       size -= result;
+       offset %= BITS_PER_LONG;
+       if (offset) {
+               tmp = *(p++);
+               tmp |= ~0UL >> (BITS_PER_LONG - offset);
+               if (size < BITS_PER_LONG)
+                       goto found_first;
+               if (~tmp)
+                       goto found_middle;
+               size -= BITS_PER_LONG;
+               result += BITS_PER_LONG;
+       }
+       while (size & ~(BITS_PER_LONG-1)) {
+               if (~(tmp = *(p++)))
+                       goto found_middle;
+               result += BITS_PER_LONG;
+               size -= BITS_PER_LONG;
+       }
+       if (!size)
+               return result;
+       tmp = *p;
+
+found_first:
+       tmp |= ~0UL << size;
+       if (tmp == ~0UL)        /* Are any bits zero? */
+               return result + size;   /* Nope. */
+found_middle:
+       return result + ffz(tmp);
+}
+EXPORT_SYMBOL(find_next_zero_bit);
+#endif
+
+#ifndef find_first_bit
+/*
+ * Find the first set bit in a memory region.
+ */
+unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
+{
+       const unsigned long *p = addr;
+       unsigned long result = 0;
+       unsigned long tmp;
+
+       while (size & ~(BITS_PER_LONG-1)) {
+               if ((tmp = *(p++)))
+                       goto found;
+               result += BITS_PER_LONG;
+               size -= BITS_PER_LONG;
+       }
+       if (!size)
+               return result;
+
+       tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
+       if (tmp == 0UL)         /* Are any bits set? */
+               return result + size;   /* Nope. */
+found:
+       return result + __ffs(tmp);
+}
+EXPORT_SYMBOL(find_first_bit);
+#endif
+
+#ifndef find_first_zero_bit
+/*
+ * Find the first cleared bit in a memory region.
+ */
+unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long 
size)
+{
+       const unsigned long *p = addr;
+       unsigned long result = 0;
+       unsigned long tmp;
+
+       while (size & ~(BITS_PER_LONG-1)) {
+               if (~(tmp = *(p++)))
+                       goto found;
+               result += BITS_PER_LONG;
+               size -= BITS_PER_LONG;
+       }
+       if (!size)
+               return result;
+
+       tmp = (*p) | (~0UL << size);
+       if (tmp == ~0UL)        /* Are any bits zero? */
+               return result + size;   /* Nope. */
+found:
+       return result + ffz(tmp);
+}
+EXPORT_SYMBOL(find_first_zero_bit);
+#endif
+
+#ifdef __BIG_ENDIAN
+
+/* include/linux/byteorder does not support "unsigned long" type */
+static inline unsigned long ext2_swabp(const unsigned long * x)
+{
+#if BITS_PER_LONG == 64
+       return (unsigned long) __swab64p((u64 *) x);
+#elif BITS_PER_LONG == 32
+       return (unsigned long) __swab32p((u32 *) x);
+#else
+#error BITS_PER_LONG not defined
+#endif
+}
+
+/* include/linux/byteorder doesn't support "unsigned long" type */
+static inline unsigned long ext2_swab(const unsigned long y)
+{
+#if BITS_PER_LONG == 64
+       return (unsigned long) __swab64((u64) y);
+#elif BITS_PER_LONG == 32
+       return (unsigned long) __swab32((u32) y);
+#else
+#error BITS_PER_LONG not defined
+#endif
+}
+
+#ifndef find_next_zero_bit_le
+unsigned long find_next_zero_bit_le(const void *addr, unsigned
+               long size, unsigned long offset)
+{
+       const unsigned long *p = addr;
+       unsigned long result = offset & ~(BITS_PER_LONG - 1);
+       unsigned long tmp;
+
+       if (offset >= size)
+               return size;
+       p += BITOP_WORD(offset);
+       size -= result;
+       offset &= (BITS_PER_LONG - 1UL);
+       if (offset) {
+               tmp = ext2_swabp(p++);
+               tmp |= (~0UL >> (BITS_PER_LONG - offset));
+               if (size < BITS_PER_LONG)
+                       goto found_first;
+               if (~tmp)
+                       goto found_middle;
+               size -= BITS_PER_LONG;
+               result += BITS_PER_LONG;
+       }
+
+       while (size & ~(BITS_PER_LONG - 1)) {
+               if (~(tmp = *(p++)))
+                       goto found_middle_swap;
+               result += BITS_PER_LONG;
+               size -= BITS_PER_LONG;
+       }
+       if (!size)
+               return result;
+       tmp = ext2_swabp(p);
+found_first:
+       tmp |= ~0UL << size;
+       if (tmp == ~0UL)        /* Are any bits zero? */
+               return result + size; /* Nope. Skip ffz */
+found_middle:
+       return result + ffz(tmp);
+
+found_middle_swap:
+       return result + ffz(ext2_swab(tmp));
+}
+EXPORT_SYMBOL(find_next_zero_bit_le);
+#endif
+
+#ifndef find_next_bit_le
+unsigned long find_next_bit_le(const void *addr, unsigned
+               long size, unsigned long offset)
+{
+       const unsigned long *p = addr;
+       unsigned long result = offset & ~(BITS_PER_LONG - 1);
+       unsigned long tmp;
+
+       if (offset >= size)
+               return size;
+       p += BITOP_WORD(offset);
+       size -= result;
+       offset &= (BITS_PER_LONG - 1UL);
+       if (offset) {
+               tmp = ext2_swabp(p++);
+               tmp &= (~0UL << offset);
+               if (size < BITS_PER_LONG)
+                       goto found_first;
+               if (tmp)
+                       goto found_middle;
+               size -= BITS_PER_LONG;
+               result += BITS_PER_LONG;
+       }
+
+       while (size & ~(BITS_PER_LONG - 1)) {
+               tmp = *(p++);
+               if (tmp)
+                       goto found_middle_swap;
+               result += BITS_PER_LONG;
+               size -= BITS_PER_LONG;
+       }
+       if (!size)
+               return result;
+       tmp = ext2_swabp(p);
+found_first:
+       tmp &= (~0UL >> (BITS_PER_LONG - size));
+       if (tmp == 0UL)         /* Are any bits set? */
+               return result + size; /* Nope. */
+found_middle:
+       return result + __ffs(tmp);
+
+found_middle_swap:
+       return result + __ffs(ext2_swab(tmp));
+}
+EXPORT_SYMBOL(find_next_bit_le);
+#endif
+
+#endif /* __BIG_ENDIAN */
diff --git a/xen/include/asm-arm/arm32/bitops.h 
b/xen/include/asm-arm/arm32/bitops.h
new file mode 100644
index 0000000..0d05258
--- /dev/null
+++ b/xen/include/asm-arm/arm32/bitops.h
@@ -0,0 +1,54 @@
+#ifndef _ARM_ARM32_BITOPS_H
+#define _ARM_ARM32_BITOPS_H
+
+extern void _set_bit(int nr, volatile void * p);
+extern void _clear_bit(int nr, volatile void * p);
+extern void _change_bit(int nr, volatile void * p);
+extern int _test_and_set_bit(int nr, volatile void * p);
+extern int _test_and_clear_bit(int nr, volatile void * p);
+extern int _test_and_change_bit(int nr, volatile void * p);
+
+#define set_bit(n,p)              _set_bit(n,p)
+#define clear_bit(n,p)            _clear_bit(n,p)
+#define change_bit(n,p)           _change_bit(n,p)
+#define test_and_set_bit(n,p)     _test_and_set_bit(n,p)
+#define test_and_clear_bit(n,p)   _test_and_clear_bit(n,p)
+#define test_and_change_bit(n,p)  _test_and_change_bit(n,p)
+
+/*
+ * Little endian assembly bitops.  nr = 0 -> byte 0 bit 0.
+ */
+extern int _find_first_zero_bit_le(const void * p, unsigned size);
+extern int _find_next_zero_bit_le(const void * p, int size, int offset);
+extern int _find_first_bit_le(const unsigned long *p, unsigned size);
+extern int _find_next_bit_le(const unsigned long *p, int size, int offset);
+
+/*
+ * Big endian assembly bitops.  nr = 0 -> byte 3 bit 0.
+ */
+extern int _find_first_zero_bit_be(const void * p, unsigned size);
+extern int _find_next_zero_bit_be(const void * p, int size, int offset);
+extern int _find_first_bit_be(const unsigned long *p, unsigned size);
+extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
+
+#ifndef __ARMEB__
+/*
+ * These are the little endian, atomic definitions.
+ */
+#define find_first_zero_bit(p,sz)      _find_first_zero_bit_le(p,sz)
+#define find_next_zero_bit(p,sz,off)   _find_next_zero_bit_le(p,sz,off)
+#define find_first_bit(p,sz)           _find_first_bit_le(p,sz)
+#define find_next_bit(p,sz,off)                _find_next_bit_le(p,sz,off)
+
+#else
+/*
+ * These are the big endian, atomic definitions.
+ */
+#define find_first_zero_bit(p,sz)      _find_first_zero_bit_be(p,sz)
+#define find_next_zero_bit(p,sz,off)   _find_next_zero_bit_be(p,sz,off)
+#define find_first_bit(p,sz)           _find_first_bit_be(p,sz)
+#define find_next_bit(p,sz,off)                _find_next_bit_be(p,sz,off)
+
+#endif
+
+#endif /* _ARM_ARM32_BITOPS_H */
diff --git a/xen/include/asm-arm/arm64/bitops.h 
b/xen/include/asm-arm/arm64/bitops.h
new file mode 100644
index 0000000..847d65c
--- /dev/null
+++ b/xen/include/asm-arm/arm64/bitops.h
@@ -0,0 +1,283 @@
+#ifndef _ARM_ARM64_BITOPS_H
+#define _ARM_ARM64_BITOPS_H
+
+/* Generic bitop support. Based on linux/include/asm-generic/bitops/atomic.h */
+
+#include <xen/spinlock.h>
+#include <xen/cache.h>          /* we use L1_CACHE_BYTES */
+
+/* Use an array of spinlocks for our atomic_ts.
+ * Hash function to index into a different SPINLOCK.
+ * Since "a" is usually an address, use one spinlock per cacheline.
+ */
+#  define ATOMIC_HASH_SIZE 4
+#  define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) 
a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
+
+extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE]/* __lock_aligned*/;
+
+#define _atomic_spin_lock_irqsave(l,f) do {     \
+       spinlock_t *s = ATOMIC_HASH(l);          \
+       spin_lock_irqsave(s, f);\
+} while(0)
+
+#define _atomic_spin_unlock_irqrestore(l,f) do {\
+        spinlock_t *s = ATOMIC_HASH(l);         \
+        spin_unlock_irqrestore(s,f);           \
+} while(0)
+
+#define FIXUP(_p, _mask)                        \
+    {                                           \
+        unsigned long __p = (unsigned long)_p;  \
+        if (__p & 0x7) {                        \
+            if (_mask > 0xffffffff) {           \
+             __p = (__p+32)&~0x7; _mask >>=32;  \
+            } else {                            \
+                __p &= ~0x7; _mask <<= 32;      \
+            }                                   \
+            if (0)printk("BITOPS: Fixup misaligned ptr %p => %#lx\n", _p, 
__p); \
+            _p = (void *)__p;                   \
+        }                                       \
+    }
+
+/**
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This function is atomic and may not be reordered.  See __set_bit()
+ * if you do not require the atomic guarantees.
+ *
+ * Note: there are no guarantees that this function will not be reordered
+ * on non x86 architectures, so if you are writing portable code,
+ * make sure not to rely on its reordering guarantees.
+ *
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+
+static inline void set_bit(int nr, volatile void *addr)
+{
+       unsigned long mask = BIT_MASK(nr);
+       unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+       unsigned long flags;
+
+        //printk("set_bit: nr %d addr %p mask %#lx p %p lock %p\n",
+        //       nr, addr, mask, p, ATOMIC_HASH(p));
+        FIXUP(p, mask);
+        //printk("set_bit: nr %d addr %p mask %#lx p %p lock %p\n",
+        //       nr, addr, mask, p, ATOMIC_HASH(p));
+        //printk("before *p is %#lx\n", *p);
+       _atomic_spin_lock_irqsave(p, flags);
+       *p  |= mask;
+       _atomic_spin_unlock_irqrestore(p, flags);
+        //printk(" after *p is %#lx\n", *p);
+}
+
+/**
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit() is atomic and may not be reordered.  However, it does
+ * not contain a memory barrier, so if it is used for locking purposes,
+ * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
+ * in order to ensure changes are visible on other processors.
+ */
+static inline void clear_bit(int nr, volatile void *addr)
+{
+       unsigned long mask = BIT_MASK(nr);
+       unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+       unsigned long flags;
+
+        FIXUP(p, mask);
+
+       _atomic_spin_lock_irqsave(p, flags);
+       *p &= ~mask;
+       _atomic_spin_unlock_irqrestore(p, flags);
+}
+
+/**
+ * change_bit - Toggle a bit in memory
+ * @nr: Bit to change
+ * @addr: Address to start counting from
+ *
+ * change_bit() is atomic and may not be reordered. It may be
+ * reordered on other architectures than x86.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void change_bit(int nr, volatile void *addr)
+{
+       unsigned long mask = BIT_MASK(nr);
+       unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+       unsigned long flags;
+
+        FIXUP(p, mask);
+
+       _atomic_spin_lock_irqsave(p, flags);
+       *p ^= mask;
+       _atomic_spin_unlock_irqrestore(p, flags);
+}
+
+/**
+ * test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It may be reordered on other architectures than x86.
+ * It also implies a memory barrier.
+ */
+static inline int test_and_set_bit(int nr, volatile void *addr)
+{
+       unsigned long mask = BIT_MASK(nr);
+       unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+       unsigned long old;
+       unsigned long flags;
+
+        FIXUP(p, mask);
+
+       _atomic_spin_lock_irqsave(p, flags);
+       old = *p;
+       *p = old | mask;
+       _atomic_spin_unlock_irqrestore(p, flags);
+
+       return (old & mask) != 0;
+}
+
+/**
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It can be reorderdered on other architectures other than x86.
+ * It also implies a memory barrier.
+ */
+static inline int test_and_clear_bit(int nr, volatile void *addr)
+{
+       unsigned long mask = BIT_MASK(nr);
+       unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+       unsigned long old;
+       unsigned long flags;
+
+        FIXUP(p, mask);
+
+       _atomic_spin_lock_irqsave(p, flags);
+       old = *p;
+       *p = old & ~mask;
+       _atomic_spin_unlock_irqrestore(p, flags);
+
+       return (old & mask) != 0;
+}
+
+/**
+ * test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static inline int test_and_change_bit(int nr, volatile void *addr)
+{
+       unsigned long mask = BIT_MASK(nr);
+       unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+       unsigned long old;
+       unsigned long flags;
+
+        FIXUP(p, mask);
+
+       _atomic_spin_lock_irqsave(p, flags);
+       old = *p;
+       *p = old ^ mask;
+       _atomic_spin_unlock_irqrestore(p, flags);
+
+       return (old & mask) != 0;
+}
+
+/* Based on linux/include/asm-generic/bitops/builtin-__ffs.h */
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static /*__*/always_inline unsigned long __ffs(unsigned long word)
+{
+        return __builtin_ctzl(word);
+}
+
+/* Based on linux/include/asm-generic/bitops/ffz.h */
+/*
+ * ffz - find first zero in word.
+ * @word: The word to search
+ *
+ * Undefined if no zero exists, so code should check against ~0UL first.
+ */
+#define ffz(x)  __ffs(~(x))
+
+
+
+/* Based on linux/include/asm-generic/bitops/find.h */
+
+#ifndef find_next_bit
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The bitmap size in bits
+ */
+extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
+               size, unsigned long offset);
+#endif
+
+#ifndef find_next_zero_bit
+/**
+ * find_next_zero_bit - find the next cleared bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The bitmap size in bits
+ */
+extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
+               long size, unsigned long offset);
+#endif
+
+#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
+
+/**
+ * find_first_bit - find the first set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit number of the first set bit.
+ */
+extern unsigned long find_first_bit(const unsigned long *addr,
+                                   unsigned long size);
+
+/**
+ * find_first_zero_bit - find the first cleared bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum size to search
+ *
+ * Returns the bit number of the first cleared bit.
+ */
+extern unsigned long find_first_zero_bit(const unsigned long *addr,
+                                        unsigned long size);
+#else /* CONFIG_GENERIC_FIND_FIRST_BIT */
+
+#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
+#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
+
+#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
+
+
+#endif /* _ARM_ARM64_BITOPS_H */
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/bitops.h b/xen/include/asm-arm/bitops.h
index 0d8ac9a..0a7caee 100644
--- a/xen/include/asm-arm/bitops.h
+++ b/xen/include/asm-arm/bitops.h
@@ -9,28 +9,14 @@
 #ifndef _ARM_BITOPS_H
 #define _ARM_BITOPS_H
 
-extern void _set_bit(int nr, volatile void * p);
-extern void _clear_bit(int nr, volatile void * p);
-extern void _change_bit(int nr, volatile void * p);
-extern int _test_and_set_bit(int nr, volatile void * p);
-extern int _test_and_clear_bit(int nr, volatile void * p);
-extern int _test_and_change_bit(int nr, volatile void * p);
-
-#define set_bit(n,p)              _set_bit(n,p)
-#define clear_bit(n,p)            _clear_bit(n,p)
-#define change_bit(n,p)           _change_bit(n,p)
-#define test_and_set_bit(n,p)     _test_and_set_bit(n,p)
-#define test_and_clear_bit(n,p)   _test_and_clear_bit(n,p)
-#define test_and_change_bit(n,p)  _test_and_change_bit(n,p)
-
 /*
  * Non-atomic bit manipulation.
  *
  * Implemented using atomics to be interrupt safe. Could alternatively
  * implement with local interrupt masking.
  */
-#define __set_bit(n,p)            _set_bit(n,p)
-#define __clear_bit(n,p)          _clear_bit(n,p)
+#define __set_bit(n,p)            set_bit(n,p)
+#define __clear_bit(n,p)          clear_bit(n,p)
 
 #define BIT(nr)                 (1UL << (nr))
 #define BIT_MASK(nr)            (1UL << ((nr) % BITS_PER_LONG))
@@ -40,6 +26,14 @@ extern int _test_and_change_bit(int nr, volatile void * p);
 #define ADDR (*(volatile long *) addr)
 #define CONST_ADDR (*(const volatile long *) addr)
 
+#if defined(CONFIG_ARM_32)
+# include <asm/arm32/bitops.h>
+#elif defined(CONFIG_ARM_64)
+# include <asm/arm64/bitops.h>
+#else
+# error "unknown ARM variant"
+#endif
+
 /**
  * __test_and_set_bit - Set a bit and return its old value
  * @nr: Bit to set
@@ -104,42 +98,6 @@ static inline int test_bit(int nr, const volatile void 
*addr)
         return 1UL & (p[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
 }
 
-/*
- * Little endian assembly bitops.  nr = 0 -> byte 0 bit 0.
- */
-extern int _find_first_zero_bit_le(const void * p, unsigned size);
-extern int _find_next_zero_bit_le(const void * p, int size, int offset);
-extern int _find_first_bit_le(const unsigned long *p, unsigned size);
-extern int _find_next_bit_le(const unsigned long *p, int size, int offset);
-
-/*
- * Big endian assembly bitops.  nr = 0 -> byte 3 bit 0.
- */
-extern int _find_first_zero_bit_be(const void * p, unsigned size);
-extern int _find_next_zero_bit_be(const void * p, int size, int offset);
-extern int _find_first_bit_be(const unsigned long *p, unsigned size);
-extern int _find_next_bit_be(const unsigned long *p, int size, int offset);
-
-#ifndef __ARMEB__
-/*
- * These are the little endian, atomic definitions.
- */
-#define find_first_zero_bit(p,sz)      _find_first_zero_bit_le(p,sz)
-#define find_next_zero_bit(p,sz,off)   _find_next_zero_bit_le(p,sz,off)
-#define find_first_bit(p,sz)           _find_first_bit_le(p,sz)
-#define find_next_bit(p,sz,off)                _find_next_bit_le(p,sz,off)
-
-#else
-/*
- * These are the big endian, atomic definitions.
- */
-#define find_first_zero_bit(p,sz)      _find_first_zero_bit_be(p,sz)
-#define find_next_zero_bit(p,sz,off)   _find_next_zero_bit_be(p,sz,off)
-#define find_first_bit(p,sz)           _find_first_bit_be(p,sz)
-#define find_next_bit(p,sz,off)                _find_next_bit_be(p,sz,off)
-
-#endif
-
 static inline int constant_fls(int x)
 {
         int r = 32;
@@ -182,10 +140,11 @@ static inline int fls(int x)
                return constant_fls(x);
 
         asm("clz\t%0, %1" : "=r" (ret) : "r" (x));
-        ret = 32 - ret;
+        ret = BITS_PER_LONG - ret;
         return ret;
 }
 
+
 #define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); })
 
 /**
diff --git a/xen/include/asm-arm/config.h b/xen/include/asm-arm/config.h
index 5a1ea1d..3910dd2 100644
--- a/xen/include/asm-arm/config.h
+++ b/xen/include/asm-arm/config.h
@@ -7,6 +7,21 @@
 #ifndef __ARM_CONFIG_H__
 #define __ARM_CONFIG_H__
 
+#if defined(__aarch64__)
+# define CONFIG_ARM_64 1
+#elif defined(__arm__)
+# define CONFIG_ARM_32 1
+#endif
+
+#if defined(CONFIG_ARM_64)
+# define LONG_BYTEORDER 3
+#else
+# define LONG_BYTEORDER 2
+#endif
+
+#define BYTES_PER_LONG (1 << LONG_BYTEORDER)
+#define BITS_PER_LONG (BYTES_PER_LONG << 3)
+
 #define CONFIG_PAGING_ASSISTANCE 1
 
 #define CONFIG_PAGING_LEVELS 3
diff --git a/xen/include/asm-arm/types.h b/xen/include/asm-arm/types.h
index 19231ef..3f6317b 100644
--- a/xen/include/asm-arm/types.h
+++ b/xen/include/asm-arm/types.h
@@ -15,8 +15,13 @@ typedef __signed__ int __s32;
 typedef unsigned int __u32;
 
 #if defined(__GNUC__) && !defined(__STRICT_ANSI__)
+#if defined(CONFIG_ARM_32)
 typedef __signed__ long long __s64;
 typedef unsigned long long __u64;
+#elif defined (CONFIG_ARM_64)
+typedef __signed__ long __s64;
+typedef unsigned long __u64;
+#endif
 #endif
 
 typedef signed char s8;
@@ -28,11 +33,19 @@ typedef unsigned short u16;
 typedef signed int s32;
 typedef unsigned int u32;
 
+#if defined(CONFIG_ARM_32)
 typedef signed long long s64;
 typedef unsigned long long u64;
 typedef u64 paddr_t;
 #define INVALID_PADDR (~0ULL)
 #define PRIpaddr "016llx"
+#elif defined (CONFIG_ARM_64)
+typedef signed long s64;
+typedef unsigned long u64;
+typedef u64 paddr_t;
+#define INVALID_PADDR (~0UL)
+#define PRIpaddr "016lx"
+#endif
 
 typedef unsigned long size_t;
 
@@ -42,10 +55,6 @@ typedef char bool_t;
 
 #endif /* __ASSEMBLY__ */
 
-#define BITS_PER_LONG 32
-#define BYTES_PER_LONG 4
-#define LONG_BYTEORDER 2
-
 #endif /* __ARM_TYPES_H__ */
 /*
  * Local variables:
--
generated by git-patchbot for /home/xen/git/xen.git#master

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.