[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-changelog] [xen-unstable] atomic: Define {read, write}_atomic() for reading/writing memory atomically.



# HG changeset patch
# User Keir Fraser <keir@xxxxxxx>
# Date 1322227918 0
# Node ID 67f70841e05853aba35bd97792f34a0569020639
# Parent  1027e7d13d02143048c7d48d7960967c5b1657a8
atomic: Define {read,write}_atomic() for reading/writing memory atomically.

Signed-off-by: Keir Fraser <keir@xxxxxxx>
---


diff -r 1027e7d13d02 -r 67f70841e058 xen/arch/x86/mm.c
--- a/xen/arch/x86/mm.c Sun Nov 20 18:26:16 2011 +0100
+++ b/xen/arch/x86/mm.c Fri Nov 25 13:31:58 2011 +0000
@@ -4629,7 +4629,7 @@
 
     /* All is good so make the update. */
     gdt_pent = map_domain_page(mfn);
-    atomic_write64((uint64_t *)&gdt_pent[offset], *(uint64_t *)&d);
+    write_atomic((uint64_t *)&gdt_pent[offset], *(uint64_t *)&d);
     unmap_domain_page(gdt_pent);
 
     put_page_type(page);
diff -r 1027e7d13d02 -r 67f70841e058 xen/arch/x86/mm/p2m-ept.c
--- a/xen/arch/x86/mm/p2m-ept.c Sun Nov 20 18:26:16 2011 +0100
+++ b/xen/arch/x86/mm/p2m-ept.c Fri Nov 25 13:31:58 2011 +0000
@@ -35,9 +35,9 @@
 #include "mm-locks.h"
 
 #define atomic_read_ept_entry(__pepte)                              \
-    ( (ept_entry_t) { .epte = atomic_read64(&(__pepte)->epte) } )
+    ( (ept_entry_t) { .epte = read_atomic(&(__pepte)->epte) } )
 #define atomic_write_ept_entry(__pepte, __epte)                     \
-    atomic_write64(&(__pepte)->epte, (__epte).epte)
+    write_atomic(&(__pepte)->epte, (__epte).epte)
 
 #define is_epte_present(ept_entry)      ((ept_entry)->epte & 0x7)
 #define is_epte_superpage(ept_entry)    ((ept_entry)->sp)
diff -r 1027e7d13d02 -r 67f70841e058 xen/arch/x86/x86_32/seg_fixup.c
--- a/xen/arch/x86/x86_32/seg_fixup.c   Sun Nov 20 18:26:16 2011 +0100
+++ b/xen/arch/x86/x86_32/seg_fixup.c   Fri Nov 25 13:31:58 2011 +0000
@@ -314,7 +314,7 @@
     b &= ~0xf0000; b |= limit & 0xf0000;
     b ^= _SEGMENT_EC; /* grows-up <-> grows-down */
     /* NB. This can't fault. Checked readable above; must also be writable. */
-    atomic_write64((uint64_t *)&table[2*idx], ((uint64_t)b<<32) | a);
+    write_atomic((uint64_t *)&table[2*idx], ((uint64_t)b<<32) | a);
     return 1;
 }
 
diff -r 1027e7d13d02 -r 67f70841e058 xen/common/timer.c
--- a/xen/common/timer.c        Sun Nov 20 18:26:16 2011 +0100
+++ b/xen/common/timer.c        Fri Nov 25 13:31:58 2011 +0000
@@ -239,7 +239,7 @@
 
     for ( ; ; )
     {
-        cpu = atomic_read16(&timer->cpu);
+        cpu = read_atomic(&timer->cpu);
         if ( unlikely(cpu == TIMER_CPU_status_killed) )
         {
             rcu_read_unlock(&timer_cpu_read_lock);
@@ -292,7 +292,7 @@
     memset(timer, 0, sizeof(*timer));
     timer->function = function;
     timer->data = data;
-    atomic_write16(&timer->cpu, cpu);
+    write_atomic(&timer->cpu, cpu);
     timer->status = TIMER_STATUS_inactive;
     if ( !timer_lock_irqsave(timer, flags) )
         BUG();
@@ -343,7 +343,7 @@
 
     for ( ; ; )
     {
-        old_cpu = atomic_read16(&timer->cpu);
+        old_cpu = read_atomic(&timer->cpu);
         if ( (old_cpu == new_cpu) || (old_cpu == TIMER_CPU_status_killed) )
         {
             rcu_read_unlock(&timer_cpu_read_lock);
@@ -375,7 +375,7 @@
         deactivate_timer(timer);
 
     list_del(&timer->inactive);
-    atomic_write16(&timer->cpu, new_cpu);
+    write_atomic(&timer->cpu, new_cpu);
     list_add(&timer->inactive, &per_cpu(timers, new_cpu).inactive);
 
     if ( active )
@@ -402,7 +402,7 @@
     list_del(&timer->inactive);
     timer->status = TIMER_STATUS_killed;
     old_cpu = timer->cpu;
-    atomic_write16(&timer->cpu, TIMER_CPU_status_killed);
+    write_atomic(&timer->cpu, TIMER_CPU_status_killed);
 
     spin_unlock_irqrestore(&per_cpu(timers, old_cpu).lock, flags);
 
@@ -573,7 +573,7 @@
              ? old_ts->heap[1] : old_ts->list) != NULL )
     {
         remove_entry(t);
-        atomic_write16(&t->cpu, new_cpu);
+        write_atomic(&t->cpu, new_cpu);
         notify |= add_entry(t);
     }
 
@@ -581,7 +581,7 @@
     {
         t = list_entry(old_ts->inactive.next, struct timer, inactive);
         list_del(&t->inactive);
-        atomic_write16(&t->cpu, new_cpu);
+        write_atomic(&t->cpu, new_cpu);
         list_add(&t->inactive, &new_ts->inactive);
     }
 
diff -r 1027e7d13d02 -r 67f70841e058 xen/include/asm-ia64/linux-xen/asm/atomic.h
--- a/xen/include/asm-ia64/linux-xen/asm/atomic.h       Sun Nov 20 18:26:16 
2011 +0100
+++ b/xen/include/asm-ia64/linux-xen/asm/atomic.h       Fri Nov 25 13:31:58 
2011 +0000
@@ -39,8 +39,8 @@
 #define ATOMIC_INIT(i)         { (i) }
 #define ATOMIC64_INIT(i)       { (i) }
 
-#define build_atomic_read(tag, type) \
-static inline type atomic_read##tag(const volatile type *addr) \
+#define build_read_atomic(tag, type) \
+static inline type read_##tag##_atomic(const volatile type *addr) \
 { \
        type ret; \
        asm volatile("ld%2.acq %0 = %1" \
@@ -49,37 +49,62 @@
        return ret; \
 }
 
-#define build_atomic_write(tag, type) \
-static inline void atomic_write##tag(volatile type *addr, type val) \
+#define build_write_atomic(tag, type) \
+static inline void write_##tag##_atomic(volatile type *addr, type val) \
 { \
        asm volatile("st%2.rel %0 = %1" \
                     : "=m" (*addr) \
                     : "r" (val), "i" (sizeof(type))); \
 }
 
-build_atomic_read(8, uint8_t)
-build_atomic_read(16, uint16_t)
-build_atomic_read(32, uint32_t)
-build_atomic_read(64, uint64_t)
-build_atomic_read(_int, int)
-build_atomic_read(_long, long)
+build_read_atomic(u8, uint8_t)
+build_read_atomic(u16, uint16_t)
+build_read_atomic(u32, uint32_t)
+build_read_atomic(u64, uint64_t)
 
-build_atomic_write(8, uint8_t)
-build_atomic_write(16, uint16_t)
-build_atomic_write(32, uint32_t)
-build_atomic_write(64, uint64_t)
-build_atomic_write(_int, int)
-build_atomic_write(_long, long)
+build_write_atomic(u8, uint8_t)
+build_write_atomic(u16, uint16_t)
+build_write_atomic(u32, uint32_t)
+build_write_atomic(u64, uint64_t)
+
+#undef build_read_atomic
+#undef build_write_atomic
+
+void __bad_atomic_size(void);
+
+#define read_atomic(p) ({                                               \
+    typeof(*p) __x;                                                     \
+    switch ( sizeof(*p) ) {                                             \
+    case 1: __x = (typeof(*p))read_u8_atomic((uint8_t *)p); break;      \
+    case 2: __x = (typeof(*p))read_u16_atomic((uint16_t *)p); break;    \
+    case 4: __x = (typeof(*p))read_u32_atomic((uint32_t *)p); break;    \
+    case 8: __x = (typeof(*p))read_u64_atomic((uint64_t *)p); break;    \
+    default: __x = 0; __bad_atomic_size(); break;                       \
+    }                                                                   \
+    __x;                                                                \
+})
+
+#define write_atomic(p, x) ({                                           \
+    typeof(*p) __x = (x);                                               \
+    switch ( sizeof(*p) ) {                                             \
+    case 1: write_u8_atomic((uint8_t *)p, (uint8_t)__x); break;         \
+    case 2: write_u16_atomic((uint16_t *)p, (uint16_t)__x); break;      \
+    case 4: write_u32_atomic((uint32_t *)p, (uint32_t)__x); break;      \
+    case 8: write_u64_atomic((uint64_t *)p, (uint64_t)__x); break;      \
+    default: __bad_atomic_size(); break;                                \
+    }                                                                   \
+    __x;                                                                \
+})
 
 #define _atomic_read(v)                ((v).counter)
 #define _atomic64_read(v)      ((v).counter)
-#define atomic_read(v)         atomic_read_int(&((v)->counter))
-#define atomic64_read(v)       atomic_read_long(&((v)->counter))
+#define atomic_read(v)         read_atomic(&((v)->counter))
+#define atomic64_read(v)       read_atomic(&((v)->counter))
 
 #define _atomic_set(v,i)       (((v).counter) = (i))
 #define _atomic64_set(v,i)     (((v).counter) = (i))
-#define atomic_set(v,i)                atomic_write_int(&((v)->counter), i)
-#define atomic64_set(v,l)      atomic_write_long(&((v)->counter), l)
+#define atomic_set(v,i)                write_atomic(&((v)->counter), i)
+#define atomic64_set(v,l)      write_atomic(&((v)->counter), l)
 
 #endif
 
diff -r 1027e7d13d02 -r 67f70841e058 xen/include/asm-x86/atomic.h
--- a/xen/include/asm-x86/atomic.h      Sun Nov 20 18:26:16 2011 +0100
+++ b/xen/include/asm-x86/atomic.h      Fri Nov 25 13:31:58 2011 +0000
@@ -4,36 +4,34 @@
 #include <xen/config.h>
 #include <asm/system.h>
 
-#define build_atomic_read(name, size, type, reg, barrier) \
+#define build_read_atomic(name, size, type, reg, barrier) \
 static inline type name(const volatile type *addr) \
 { type ret; asm volatile("mov" size " %1,%0":reg (ret) \
 :"m" (*(volatile type *)addr) barrier); return ret; }
 
-#define build_atomic_write(name, size, type, reg, barrier) \
+#define build_write_atomic(name, size, type, reg, barrier) \
 static inline void name(volatile type *addr, type val) \
 { asm volatile("mov" size " %1,%0": "=m" (*(volatile type *)addr) \
 :reg (val) barrier); }
 
-build_atomic_read(atomic_read8, "b", uint8_t, "=q", )
-build_atomic_read(atomic_read16, "w", uint16_t, "=r", )
-build_atomic_read(atomic_read32, "l", uint32_t, "=r", )
-build_atomic_read(atomic_read_int, "l", int, "=r", )
+build_read_atomic(read_u8_atomic, "b", uint8_t, "=q", )
+build_read_atomic(read_u16_atomic, "w", uint16_t, "=r", )
+build_read_atomic(read_u32_atomic, "l", uint32_t, "=r", )
 
-build_atomic_write(atomic_write8, "b", uint8_t, "q", )
-build_atomic_write(atomic_write16, "w", uint16_t, "r", )
-build_atomic_write(atomic_write32, "l", uint32_t, "r", )
-build_atomic_write(atomic_write_int, "l", int, "r", )
+build_write_atomic(write_u8_atomic, "b", uint8_t, "q", )
+build_write_atomic(write_u16_atomic, "w", uint16_t, "r", )
+build_write_atomic(write_u32_atomic, "l", uint32_t, "r", )
 
 #ifdef __x86_64__
-build_atomic_read(atomic_read64, "q", uint64_t, "=r", )
-build_atomic_write(atomic_write64, "q", uint64_t, "r", )
+build_read_atomic(read_u64_atomic, "q", uint64_t, "=r", )
+build_write_atomic(write_u64_atomic, "q", uint64_t, "r", )
 #else
-static inline uint64_t atomic_read64(const volatile uint64_t *addr)
+static inline uint64_t read_u64_atomic(const volatile uint64_t *addr)
 {
     uint64_t *__addr = (uint64_t *)addr;
     return __cmpxchg8b(__addr, 0, 0);
 }
-static inline void atomic_write64(volatile uint64_t *addr, uint64_t val)
+static inline void write_u64_atomic(volatile uint64_t *addr, uint64_t val)
 {
     uint64_t old = *addr, new, *__addr = (uint64_t *)addr;
     while ( (new = __cmpxchg8b(__addr, old, val)) != old )
@@ -41,8 +39,34 @@
 }
 #endif
 
-#undef build_atomic_read
-#undef build_atomic_write
+#undef build_read_atomic
+#undef build_write_atomic
+
+void __bad_atomic_size(void);
+
+#define read_atomic(p) ({                                               \
+    typeof(*p) __x;                                                     \
+    switch ( sizeof(*p) ) {                                             \
+    case 1: __x = (typeof(*p))read_u8_atomic((uint8_t *)p); break;      \
+    case 2: __x = (typeof(*p))read_u16_atomic((uint16_t *)p); break;    \
+    case 4: __x = (typeof(*p))read_u32_atomic((uint32_t *)p); break;    \
+    case 8: __x = (typeof(*p))read_u64_atomic((uint64_t *)p); break;    \
+    default: __x = 0; __bad_atomic_size(); break;                       \
+    }                                                                   \
+    __x;                                                                \
+})
+
+#define write_atomic(p, x) ({                                           \
+    typeof(*p) __x = (x);                                               \
+    switch ( sizeof(*p) ) {                                             \
+    case 1: write_u8_atomic((uint8_t *)p, (uint8_t)__x); break;         \
+    case 2: write_u16_atomic((uint16_t *)p, (uint16_t)__x); break;      \
+    case 4: write_u32_atomic((uint32_t *)p, (uint32_t)__x); break;      \
+    case 8: write_u64_atomic((uint64_t *)p, (uint64_t)__x); break;      \
+    default: __bad_atomic_size(); break;                                \
+    }                                                                   \
+    __x;                                                                \
+})
 
 /*
  * NB. I've pushed the volatile qualifier into the operations. This allows
@@ -60,7 +84,7 @@
  * Atomically reads the value of @v.
  */
 #define _atomic_read(v)  ((v).counter)
-#define atomic_read(v)   atomic_read_int(&((v)->counter))
+#define atomic_read(v)   read_atomic(&((v)->counter))
 
 /**
  * atomic_set - set atomic variable
@@ -70,7 +94,7 @@
  * Atomically sets the value of @v to @i.
  */ 
 #define _atomic_set(v,i) (((v).counter) = (i))
-#define atomic_set(v,i)  atomic_write_int(&((v)->counter), (i))
+#define atomic_set(v,i)  write_atomic(&((v)->counter), (i))
 
 /**
  * atomic_add - add integer to atomic variable
diff -r 1027e7d13d02 -r 67f70841e058 xen/include/asm-x86/x86_32/page.h
--- a/xen/include/asm-x86/x86_32/page.h Sun Nov 20 18:26:16 2011 +0100
+++ b/xen/include/asm-x86/x86_32/page.h Fri Nov 25 13:31:58 2011 +0000
@@ -85,15 +85,15 @@
 
 #endif
 
-#define pte_read_atomic(ptep)       atomic_read64(ptep)
-#define pte_write_atomic(ptep, pte) atomic_write64(ptep, pte)
-#define pte_write(ptep, pte) do {                             \
-    u32 *__ptep_words = (u32 *)(ptep);                        \
-    atomic_write32(&__ptep_words[0], 0);                      \
-    wmb();                                                    \
-    atomic_write32(&__ptep_words[1], (pte) >> 32);            \
-    wmb();                                                    \
-    atomic_write32(&__ptep_words[0], (pte) >>  0);            \
+#define pte_read_atomic(ptep)       read_atomic(ptep)
+#define pte_write_atomic(ptep, pte) write_atomic(ptep, pte)
+#define pte_write(ptep, pte) do {                   \
+    u32 *__ptep_words = (u32 *)(ptep);              \
+    write_atomic(&__ptep_words[0], 0);              \
+    wmb();                                          \
+    write_atomic(&__ptep_words[1], (pte) >> 32);    \
+    wmb();                                          \
+    write_atomic(&__ptep_words[0], (pte) >>  0);    \
 } while ( 0 )
 
 /* root table */
diff -r 1027e7d13d02 -r 67f70841e058 xen/include/asm-x86/x86_64/page.h
--- a/xen/include/asm-x86/x86_64/page.h Sun Nov 20 18:26:16 2011 +0100
+++ b/xen/include/asm-x86/x86_64/page.h Fri Nov 25 13:31:58 2011 +0000
@@ -116,9 +116,9 @@
 
 #endif /* !__ASSEMBLY__ */
 
-#define pte_read_atomic(ptep)       atomic_read64(ptep)
-#define pte_write_atomic(ptep, pte) atomic_write64(ptep, pte)
-#define pte_write(ptep, pte)        atomic_write64(ptep, pte)
+#define pte_read_atomic(ptep)       read_atomic(ptep)
+#define pte_write_atomic(ptep, pte) write_atomic(ptep, pte)
+#define pte_write(ptep, pte)        write_atomic(ptep, pte)
 
 /* Given a virtual address, get an entry offset into a linear page table. */
 #define l1_linear_offset(_a) (((_a) & VADDR_MASK) >> L1_PAGETABLE_SHIFT)

_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.