[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH 16/45] xen: arm64: barriers and wait for interrupts/events



Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
---
 xen/include/asm-arm/arm32/system.h |   29 +++++++++++++++++++++++++++++
 xen/include/asm-arm/arm64/system.h |   28 ++++++++++++++++++++++++++++
 xen/include/asm-arm/system.h       |   20 ++++++++------------
 3 files changed, 65 insertions(+), 12 deletions(-)
 create mode 100644 xen/include/asm-arm/arm32/system.h
 create mode 100644 xen/include/asm-arm/arm64/system.h

diff --git a/xen/include/asm-arm/arm32/system.h 
b/xen/include/asm-arm/arm32/system.h
new file mode 100644
index 0000000..91098a0
--- /dev/null
+++ b/xen/include/asm-arm/arm32/system.h
@@ -0,0 +1,29 @@
+/* Portions taken from Linux arch arm */
+#ifndef __ASM_ARM32_SYSTEM_H
+#define __ASM_ARM32_SYSTEM_H
+
+#define sev() __asm__ __volatile__ ("sev" : : : "memory")
+#define wfe() __asm__ __volatile__ ("wfe" : : : "memory")
+#define wfi() __asm__ __volatile__ ("wfi" : : : "memory")
+
+#define isb() __asm__ __volatile__ ("isb" : : : "memory")
+#define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
+#define dmb() __asm__ __volatile__ ("dmb" : : : "memory")
+
+#define mb()            dsb()
+#define rmb()           dsb()
+#define wmb()           mb()
+
+#define smp_mb()        dmb()
+#define smp_rmb()       dmb()
+#define smp_wmb()       dmb()
+
+#endif
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/arm64/system.h 
b/xen/include/asm-arm/arm64/system.h
new file mode 100644
index 0000000..33c031d
--- /dev/null
+++ b/xen/include/asm-arm/arm64/system.h
@@ -0,0 +1,28 @@
+/* Portions taken from Linux arch arm64 */
+#ifndef __ASM_ARM64_SYSTEM_H
+#define __ASM_ARM64_SYSTEM_H
+
+#define sev()           asm volatile("sev" : : : "memory")
+#define wfe()           asm volatile("wfe" : : : "memory")
+#define wfi()           asm volatile("wfi" : : : "memory")
+
+#define isb()           asm volatile("isb" : : : "memory")
+#define dsb()           asm volatile("dsb sy" : : : "memory")
+
+#define mb()            dsb()
+#define rmb()           asm volatile("dsb ld" : : : "memory")
+#define wmb()           asm volatile("dsb st" : : : "memory")
+
+#define smp_mb()        asm volatile("dmb ish" : : : "memory")
+#define smp_rmb()       asm volatile("dmb ishld" : : : "memory")
+#define smp_wmb()       asm volatile("dmb ishst" : : : "memory")
+
+#endif
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/system.h b/xen/include/asm-arm/system.h
index 216ef1f..8b4c97a 100644
--- a/xen/include/asm-arm/system.h
+++ b/xen/include/asm-arm/system.h
@@ -11,18 +11,6 @@
 #define xchg(ptr,x) \
         ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
 
-#define isb() __asm__ __volatile__ ("isb" : : : "memory")
-#define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
-#define dmb() __asm__ __volatile__ ("dmb" : : : "memory")
-
-#define mb()            dsb()
-#define rmb()           dsb()
-#define wmb()           mb()
-
-#define smp_mb()        dmb()
-#define smp_rmb()       dmb()
-#define smp_wmb()       dmb()
-
 /*
  * This is used to ensure the compiler did actually allocate the register we
  * asked it for some inline assembly sequences.  Apparently we can't trust
@@ -33,6 +21,14 @@
  */
 #define __asmeq(x, y)  ".ifnc " x "," y " ; .err ; .endif\n\t"
 
+#if defined(CONFIG_ARM_32)
+# include <asm/arm32/system.h>
+#elif defined(CONFIG_ARM_64)
+# include <asm/arm64/system.h>
+#else
+# error "unknown ARM variant"
+#endif
+
 extern void __bad_xchg(volatile void *, int);
 
 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int 
size)
-- 
1.7.2.5


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.