|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [xen master] xen: Introduce cmpxchg64() and guest_cmpxchg64()
commit c729d54331ffb05abe93ed2349377092f134fdbd
Author: Julien Grall <jgrall@xxxxxxxxxx>
AuthorDate: Fri Sep 11 17:06:22 2020 +0100
Commit: Julien Grall <jgrall@xxxxxxxxxx>
CommitDate: Fri Sep 18 17:33:01 2020 +0100
xen: Introduce cmpxchg64() and guest_cmpxchg64()
The IOREQ code is using cmpxchg() with 64-bit value. At the moment, this
is x86 code, but there is plan to make it common.
To cater 32-bit arch, introduce two new helpers to deal with 64-bit
cmpxchg().
The Arm 32-bit implementation of cmpxchg64() is based on the __cmpxchg64
in Linux v5.8 (arch/arm/include/asm/cmpxchg.h).
Note that only guest_cmpxchg64() is introduced on x86 so far.
Signed-off-by: Julien Grall <jgrall@xxxxxxxxxx>
Reviewed-by: Stefano Stabellini <sstabellini@xxxxxxxxxx>
Acked-by: Jan Beulich <jbeulich@xxxxxxxx>
---
xen/include/asm-arm/arm32/cmpxchg.h | 68 +++++++++++++++++++++++++++++++++++++
xen/include/asm-arm/arm64/cmpxchg.h | 5 +++
xen/include/asm-arm/guest_atomics.h | 22 ++++++++++++
xen/include/asm-x86/guest_atomics.h | 1 +
4 files changed, 96 insertions(+)
diff --git a/xen/include/asm-arm/arm32/cmpxchg.h
b/xen/include/asm-arm/arm32/cmpxchg.h
index 3ef1e5c632..b0bd1d8b68 100644
--- a/xen/include/asm-arm/arm32/cmpxchg.h
+++ b/xen/include/asm-arm/arm32/cmpxchg.h
@@ -87,6 +87,37 @@ __CMPXCHG_CASE(b, 1)
__CMPXCHG_CASE(h, 2)
__CMPXCHG_CASE( , 4)
+static inline bool __cmpxchg_case_8(volatile uint64_t *ptr,
+ uint64_t *old,
+ uint64_t new,
+ bool timeout,
+ unsigned int max_try)
+{
+ uint64_t oldval;
+ uint64_t res;
+
+ do {
+ asm volatile(
+ " ldrexd %1, %H1, [%3]\n"
+ " teq %1, %4\n"
+ " teqeq %H1, %H4\n"
+ " movne %0, #0\n"
+ " movne %H0, #0\n"
+ " bne 2f\n"
+ " strexd %0, %5, %H5, [%3]\n"
+ "2:"
+ : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
+ : "r" (ptr), "r" (*old), "r" (new)
+ : "memory", "cc");
+ if (!res)
+ break;
+ } while (!timeout || ((--max_try) > 0));
+
+ *old = oldval;
+
+ return !res;
+}
+
static always_inline bool __int_cmpxchg(volatile void *ptr, unsigned long *old,
unsigned long new, int size,
bool timeout, unsigned int max_try)
@@ -145,11 +176,48 @@ static always_inline bool __cmpxchg_timeout(volatile void
*ptr,
return ret;
}
+/*
+ * The helper may fail to update the memory if the action takes too long.
+ *
+ * @old: On call the value pointed contains the expected old value. It will be
+ * updated to the actual old value.
+ * @max_try: Maximum number of iterations
+ *
+ * The helper will return true when the update has succeeded (i.e no
+ * timeout) and false if the update has failed.
+ */
+static always_inline bool __cmpxchg64_timeout(volatile uint64_t *ptr,
+ uint64_t *old,
+ uint64_t new,
+ unsigned int max_try)
+{
+ bool ret;
+
+ smp_mb();
+ ret = __cmpxchg_case_8(ptr, old, new, true, max_try);
+ smp_mb();
+
+ return ret;
+}
+
#define cmpxchg(ptr,o,n) \
((__typeof__(*(ptr)))__cmpxchg((ptr), \
(unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))))
+
+static inline uint64_t cmpxchg64(volatile uint64_t *ptr,
+ uint64_t old,
+ uint64_t new)
+{
+ smp_mb();
+ if (!__cmpxchg_case_8(ptr, &old, new, false, 0))
+ ASSERT_UNREACHABLE();
+ smp_mb();
+
+ return old;
+}
+
#endif
/*
* Local variables:
diff --git a/xen/include/asm-arm/arm64/cmpxchg.h
b/xen/include/asm-arm/arm64/cmpxchg.h
index f4a8c1ccba..10e4edc022 100644
--- a/xen/include/asm-arm/arm64/cmpxchg.h
+++ b/xen/include/asm-arm/arm64/cmpxchg.h
@@ -167,6 +167,11 @@ static always_inline bool __cmpxchg_timeout(volatile void
*ptr,
__ret; \
})
+#define cmpxchg64(ptr, o, n) cmpxchg(ptr, o, n)
+
+#define __cmpxchg64_timeout(ptr, old, new, max_try) \
+ __cmpxchg_timeout(ptr, old, new, 8, max_try)
+
#endif
/*
* Local variables:
diff --git a/xen/include/asm-arm/guest_atomics.h
b/xen/include/asm-arm/guest_atomics.h
index 20347849e5..9e2e96d4ff 100644
--- a/xen/include/asm-arm/guest_atomics.h
+++ b/xen/include/asm-arm/guest_atomics.h
@@ -115,6 +115,28 @@ static inline unsigned long __guest_cmpxchg(struct domain
*d,
(unsigned long)(n),\
sizeof (*(ptr))))
+static inline uint64_t guest_cmpxchg64(struct domain *d,
+ volatile uint64_t *ptr,
+ uint64_t old,
+ uint64_t new)
+{
+ uint64_t oldval = old;
+
+ perfc_incr(atomics_guest);
+
+ if ( __cmpxchg64_timeout(ptr, &oldval, new,
+ this_cpu(guest_safe_atomic_max)) )
+ return oldval;
+
+ perfc_incr(atomics_guest_paused);
+
+ domain_pause_nosync(d);
+ oldval = cmpxchg64(ptr, old, new);
+ domain_unpause(d);
+
+ return oldval;
+}
+
#endif /* _ARM_GUEST_ATOMICS_H */
/*
* Local variables:
diff --git a/xen/include/asm-x86/guest_atomics.h
b/xen/include/asm-x86/guest_atomics.h
index 029417c8ff..c2dec0d650 100644
--- a/xen/include/asm-x86/guest_atomics.h
+++ b/xen/include/asm-x86/guest_atomics.h
@@ -20,6 +20,7 @@
((void)(d), test_and_change_bit(nr, p))
#define guest_cmpxchg(d, ptr, o, n) ((void)(d), cmpxchg(ptr, o, n))
+#define guest_cmpxchg64 guest_cmpxchg
#endif /* _X86_GUEST_ATOMICS_H */
/*
--
generated by git-patchbot for /home/xen/git/xen.git#master
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |