|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] Re: [Xen-devel] [PATCH V2 14/46] xen: arm64: barriers and wait for interrupts/events
On Thu, 2013-02-21 at 15:27 +0000, Ian Campbell wrote:
> On Thu, 2013-02-21 at 15:01 +0000, Tim Deegan wrote:
> > At 16:47 +0000 on 14 Feb (1360860448), Ian Campbell wrote:
> > > Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
> >
> > Acked-by: Tim Deegan <tim@xxxxxxx>
> >
> > Were we also talking about having smb_ barriers equivalent to the normas
> > ones, like on x86?
>
> Yes, I think in a F2F conversation which is why I forgot.
FYI it ended up like this. I retained your Ack, hope that's ok.
8<--------------------------------
>From 117f08d439bca2798db71b9971429e32424ad092 Mon Sep 17 00:00:00 2001
From: Ian Campbell <ian.campbell@xxxxxxxxxx>
Date: Thu, 13 Dec 2012 13:18:07 +0000
Subject: [PATCH] xen: arm64: barriers and wait for interrupts/events
Signed-off-by: Ian Campbell <ian.campbell@xxxxxxxxxx>
Acked-by: Tim Deegan <tim@xxxxxxx>
---
v3: - smp barriers are the same as up (which are conservative)
- add dmb
---
xen/include/asm-arm/arm32/system.h | 29 +++++++++++++++++++++++++++++
xen/include/asm-arm/arm64/system.h | 29 +++++++++++++++++++++++++++++
xen/include/asm-arm/system.h | 20 ++++++++------------
3 files changed, 66 insertions(+), 12 deletions(-)
create mode 100644 xen/include/asm-arm/arm32/system.h
create mode 100644 xen/include/asm-arm/arm64/system.h
diff --git a/xen/include/asm-arm/arm32/system.h
b/xen/include/asm-arm/arm32/system.h
new file mode 100644
index 0000000..91098a0
--- /dev/null
+++ b/xen/include/asm-arm/arm32/system.h
@@ -0,0 +1,29 @@
+/* Portions taken from Linux arch arm */
+#ifndef __ASM_ARM32_SYSTEM_H
+#define __ASM_ARM32_SYSTEM_H
+
+#define sev() __asm__ __volatile__ ("sev" : : : "memory")
+#define wfe() __asm__ __volatile__ ("wfe" : : : "memory")
+#define wfi() __asm__ __volatile__ ("wfi" : : : "memory")
+
+#define isb() __asm__ __volatile__ ("isb" : : : "memory")
+#define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
+#define dmb() __asm__ __volatile__ ("dmb" : : : "memory")
+
+#define mb() dsb()
+#define rmb() dsb()
+#define wmb() mb()
+
+#define smp_mb() dmb()
+#define smp_rmb() dmb()
+#define smp_wmb() dmb()
+
+#endif
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/arm64/system.h
b/xen/include/asm-arm/arm64/system.h
new file mode 100644
index 0000000..b3ea4a3
--- /dev/null
+++ b/xen/include/asm-arm/arm64/system.h
@@ -0,0 +1,29 @@
+/* Portions taken from Linux arch arm64 */
+#ifndef __ASM_ARM64_SYSTEM_H
+#define __ASM_ARM64_SYSTEM_H
+
+#define sev() asm volatile("sev" : : : "memory")
+#define wfe() asm volatile("wfe" : : : "memory")
+#define wfi() asm volatile("wfi" : : : "memory")
+
+#define isb() asm volatile("isb" : : : "memory")
+#define dsb() asm volatile("dsb sy" : : : "memory")
+#define dmb() asm volatile("dmb sy" : : : "memory")
+
+#define mb() dsb()
+#define rmb() dsb()
+#define wmb() mb()
+
+#define smp_mb() dmb()
+#define smp_rmb() dmb()
+#define smp_wmb() dmb()
+
+#endif
+/*
+ * Local variables:
+ * mode: C
+ * c-set-style: "BSD"
+ * c-basic-offset: 4
+ * indent-tabs-mode: nil
+ * End:
+ */
diff --git a/xen/include/asm-arm/system.h b/xen/include/asm-arm/system.h
index 216ef1f..8b4c97a 100644
--- a/xen/include/asm-arm/system.h
+++ b/xen/include/asm-arm/system.h
@@ -11,18 +11,6 @@
#define xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
-#define isb() __asm__ __volatile__ ("isb" : : : "memory")
-#define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
-#define dmb() __asm__ __volatile__ ("dmb" : : : "memory")
-
-#define mb() dsb()
-#define rmb() dsb()
-#define wmb() mb()
-
-#define smp_mb() dmb()
-#define smp_rmb() dmb()
-#define smp_wmb() dmb()
-
/*
* This is used to ensure the compiler did actually allocate the register we
* asked it for some inline assembly sequences. Apparently we can't trust
@@ -33,6 +21,14 @@
*/
#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
+#if defined(CONFIG_ARM_32)
+# include <asm/arm32/system.h>
+#elif defined(CONFIG_ARM_64)
+# include <asm/arm64/system.h>
+#else
+# error "unknown ARM variant"
+#endif
+
extern void __bad_xchg(volatile void *, int);
static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int
size)
--
1.7.2.5
_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |