[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-devel] [PATCHv3 1/4] x86: provide xadd()
xadd() atomically adds a value and returns the previous value. This is needed to implement ticket locks. Signed-off-by: David Vrabel <david.vrabel@xxxxxxxxxx> --- xen/include/asm-x86/system.h | 47 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) diff --git a/xen/include/asm-x86/system.h b/xen/include/asm-x86/system.h index 7111329..f244c8d 100644 --- a/xen/include/asm-x86/system.h +++ b/xen/include/asm-x86/system.h @@ -118,6 +118,53 @@ static always_inline unsigned long __cmpxchg( }) /* + * Undefined symbol to cause link failure if a wrong size is used with + * xadd(). + */ +extern unsigned long __bad_xadd_size(void); + +static always_inline unsigned long __xadd( + volatile void *ptr, unsigned long v, int size) +{ + switch ( size ) + { + case 1: + asm volatile ( "lock; xaddb %b0,%1" + : "+r" (v), "+m" (*__xg((volatile void *)ptr)) + :: "memory"); + return v; + case 2: + asm volatile ( "lock; xaddw %w0,%1" + : "+r" (v), "+m" (*__xg((volatile void *)ptr)) + :: "memory"); + return v; + case 4: + asm volatile ( "lock; xaddl %k0,%1" + : "+r" (v), "+m" (*__xg((volatile void *)ptr)) + :: "memory"); + return v; + case 8: + asm volatile ( "lock; xaddq %q0,%1" + : "+r" (v), "+m" (*__xg((volatile void *)ptr)) + :: "memory"); + + return v; + default: + return __bad_xadd_size(); + } +} + +/* + * Atomically add @v to the 1, 2, 4, or 8 byte value at @ptr. Returns + * the previous value. + * + * This is a full memory barrier. + */ +#define xadd(ptr, v) ({ \ + __xadd((ptr), (unsigned long)(v), sizeof(*(ptr))); \ + }) + +/* * Both Intel and AMD agree that, from a programmer's viewpoint: * Loads cannot be reordered relative to other loads. * Stores cannot be reordered relative to other stores. -- 1.7.10.4 _______________________________________________ Xen-devel mailing list Xen-devel@xxxxxxxxxxxxx http://lists.xen.org/xen-devel
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |