|
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [PATCH v5 1/7] xen/riscv: use {read,write}{b,w,l,q}_cpu() to define {read,write}_atomic()
In Xen, memory-ordered atomic operations are not necessary,
based on {read,write}_atomic() implementations for other architectures.
Therefore, {read,write}{b,w,l,q}_cpu() can be used instead of
{read,write}{b,w,l,q}(), allowing the caller to decide if additional
fences should be applied before or after {read,write}_atomic().
Change the declaration of _write_atomic() to accept a 'volatile void *'
type for the 'x' argument instead of 'unsigned long'.
This prevents compilation errors such as:
1."discards 'volatile' qualifier from pointer target type," which occurs
due to the initialization of a volatile pointer,
e.g., `volatile uint8_t *ptr = p;` in _add_sized().
2."incompatible type for argument 2 of '_write_atomic'," which can occur
when calling write_pte(), where 'x' is of type pte_t rather than
unsigned long.
Signed-off-by: Oleksii Kurochko <oleksii.kurochko@xxxxxxxxx>
---
Changes in v5:
- new patch.
---
xen/arch/riscv/include/asm/atomic.h | 25 +++++++++++--------------
1 file changed, 11 insertions(+), 14 deletions(-)
diff --git a/xen/arch/riscv/include/asm/atomic.h
b/xen/arch/riscv/include/asm/atomic.h
index 31b91a79c8..446c8c7928 100644
--- a/xen/arch/riscv/include/asm/atomic.h
+++ b/xen/arch/riscv/include/asm/atomic.h
@@ -31,21 +31,17 @@
void __bad_atomic_size(void);
-/*
- * Legacy from Linux kernel. For some reason they wanted to have ordered
- * read/write access. Thereby read* is used instead of read*_cpu()
- */
static always_inline void read_atomic_size(const volatile void *p,
void *res,
unsigned int size)
{
switch ( size )
{
- case 1: *(uint8_t *)res = readb(p); break;
- case 2: *(uint16_t *)res = readw(p); break;
- case 4: *(uint32_t *)res = readl(p); break;
+ case 1: *(uint8_t *)res = readb_cpu(p); break;
+ case 2: *(uint16_t *)res = readw_cpu(p); break;
+ case 4: *(uint32_t *)res = readl_cpu(p); break;
#ifndef CONFIG_RISCV_32
- case 8: *(uint32_t *)res = readq(p); break;
+ case 8: *(uint32_t *)res = readq_cpu(p); break;
#endif
default: __bad_atomic_size(); break;
}
@@ -58,15 +54,16 @@ static always_inline void read_atomic_size(const volatile
void *p,
})
static always_inline void _write_atomic(volatile void *p,
- unsigned long x, unsigned int size)
+ volatile void *x,
+ unsigned int size)
{
switch ( size )
{
- case 1: writeb(x, p); break;
- case 2: writew(x, p); break;
- case 4: writel(x, p); break;
+ case 1: writeb_cpu(*(uint8_t *)x, p); break;
+ case 2: writew_cpu(*(uint16_t *)x, p); break;
+ case 4: writel_cpu(*(uint32_t *)x, p); break;
#ifndef CONFIG_RISCV_32
- case 8: writeq(x, p); break;
+ case 8: writeq_cpu(*(uint64_t *)x, p); break;
#endif
default: __bad_atomic_size(); break;
}
@@ -75,7 +72,7 @@ static always_inline void _write_atomic(volatile void *p,
#define write_atomic(p, x) \
({ \
typeof(*(p)) x_ = (x); \
- _write_atomic(p, x_, sizeof(*(p))); \
+ _write_atomic(p, &x_, sizeof(*(p))); \
})
static always_inline void _add_sized(volatile void *p,
--
2.46.0
|
![]() |
Lists.xenproject.org is hosted with RackSpace, monitoring our |