[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index] [Xen-changelog] [xen master] Revert "use ticket locks for spin locks"
commit 818e376d3b17845d39735517650224c64c9e0078 Author: Jan Beulich <jbeulich@xxxxxxxx> AuthorDate: Thu May 28 12:07:33 2015 +0200 Commit: Jan Beulich <jbeulich@xxxxxxxx> CommitDate: Thu May 28 12:07:33 2015 +0200 Revert "use ticket locks for spin locks" This reverts commit 45fcc4568c5162b00fb3907fb158af82dd484a3d as it introduces yet to be explained issues on ARM. --- xen/common/spinlock.c | 116 ++++++++++++++++++------------------------ xen/include/asm-arm/system.h | 3 - xen/include/asm-x86/system.h | 11 ---- xen/include/xen/spinlock.h | 16 ++---- 4 files changed, 54 insertions(+), 92 deletions(-) diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c index c8dc8ba..5fd8b1c 100644 --- a/xen/common/spinlock.c +++ b/xen/common/spinlock.c @@ -115,134 +115,125 @@ void spin_debug_disable(void) #endif -static always_inline spinlock_tickets_t observe_lock(spinlock_tickets_t *t) -{ - spinlock_tickets_t v; - - smp_rmb(); - v.head_tail = read_atomic(&t->head_tail); - return v; -} - -static always_inline u16 observe_head(spinlock_tickets_t *t) -{ - smp_rmb(); - return read_atomic(&t->head); -} - void _spin_lock(spinlock_t *lock) { - spinlock_tickets_t tickets = { .tail = 1, }; LOCK_PROFILE_VAR; check_lock(&lock->debug); - tickets.head_tail = arch_fetch_and_add(&lock->tickets.head_tail, - tickets.head_tail); - while ( tickets.tail != observe_head(&lock->tickets) ) + while ( unlikely(!_raw_spin_trylock(&lock->raw)) ) { LOCK_PROFILE_BLOCK; - cpu_relax(); + while ( likely(_raw_spin_is_locked(&lock->raw)) ) + cpu_relax(); } LOCK_PROFILE_GOT; preempt_disable(); - arch_lock_acquire_barrier(); } void _spin_lock_irq(spinlock_t *lock) { + LOCK_PROFILE_VAR; + ASSERT(local_irq_is_enabled()); local_irq_disable(); - _spin_lock(lock); + check_lock(&lock->debug); + while ( unlikely(!_raw_spin_trylock(&lock->raw)) ) + { + LOCK_PROFILE_BLOCK; + local_irq_enable(); + while ( likely(_raw_spin_is_locked(&lock->raw)) ) + cpu_relax(); + local_irq_disable(); + } + LOCK_PROFILE_GOT; + preempt_disable(); } unsigned long _spin_lock_irqsave(spinlock_t *lock) { unsigned long flags; + LOCK_PROFILE_VAR; local_irq_save(flags); - _spin_lock(lock); + check_lock(&lock->debug); + while ( unlikely(!_raw_spin_trylock(&lock->raw)) ) + { + LOCK_PROFILE_BLOCK; + local_irq_restore(flags); + while ( likely(_raw_spin_is_locked(&lock->raw)) ) + cpu_relax(); + local_irq_disable(); + } + LOCK_PROFILE_GOT; + preempt_disable(); return flags; } void _spin_unlock(spinlock_t *lock) { - arch_lock_release_barrier(); preempt_enable(); LOCK_PROFILE_REL; - add_sized(&lock->tickets.head, 1); + _raw_spin_unlock(&lock->raw); } void _spin_unlock_irq(spinlock_t *lock) { - _spin_unlock(lock); + preempt_enable(); + LOCK_PROFILE_REL; + _raw_spin_unlock(&lock->raw); local_irq_enable(); } void _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags) { - _spin_unlock(lock); + preempt_enable(); + LOCK_PROFILE_REL; + _raw_spin_unlock(&lock->raw); local_irq_restore(flags); } int _spin_is_locked(spinlock_t *lock) { check_lock(&lock->debug); - return lock->tickets.head != lock->tickets.tail; + return _raw_spin_is_locked(&lock->raw); } int _spin_trylock(spinlock_t *lock) { - spinlock_tickets_t old, new; - check_lock(&lock->debug); - old = observe_lock(&lock->tickets); - if ( old.head != old.tail ) - return 0; - new = old; - new.tail++; - if ( cmpxchg(&lock->tickets.head_tail, - old.head_tail, new.head_tail) != old.head_tail ) + if ( !_raw_spin_trylock(&lock->raw) ) return 0; #ifdef LOCK_PROFILE if (lock->profile) lock->profile->time_locked = NOW(); #endif preempt_disable(); - /* - * cmpxchg() is a full barrier so no need for an - * arch_lock_acquire_barrier(). - */ return 1; } void _spin_barrier(spinlock_t *lock) { - spinlock_tickets_t sample; #ifdef LOCK_PROFILE s_time_t block = NOW(); -#endif + u64 loop = 0; check_barrier(&lock->debug); - smp_mb(); - sample = observe_lock(&lock->tickets); - if ( sample.head != sample.tail ) + do { smp_mb(); loop++;} while ( _raw_spin_is_locked(&lock->raw) ); + if ((loop > 1) && lock->profile) { - while ( observe_head(&lock->tickets) == sample.head ) - cpu_relax(); -#ifdef LOCK_PROFILE - if ( lock->profile ) - { - lock->profile->time_block += NOW() - block; - lock->profile->block_cnt++; - } -#endif + lock->profile->time_block += NOW() - block; + lock->profile->block_cnt++; } +#else + check_barrier(&lock->debug); + do { smp_mb(); } while ( _raw_spin_is_locked(&lock->raw) ); +#endif smp_mb(); } int _spin_trylock_recursive(spinlock_t *lock) { - unsigned int cpu = smp_processor_id(); + int cpu = smp_processor_id(); /* Don't allow overflow of recurse_cpu field. */ BUILD_BUG_ON(NR_CPUS > 0xfffu); @@ -265,17 +256,8 @@ int _spin_trylock_recursive(spinlock_t *lock) void _spin_lock_recursive(spinlock_t *lock) { - unsigned int cpu = smp_processor_id(); - - if ( likely(lock->recurse_cpu != cpu) ) - { - _spin_lock(lock); - lock->recurse_cpu = cpu; - } - - /* We support only fairly shallow recursion, else the counter overflows. */ - ASSERT(lock->recurse_cnt < 0xfu); - lock->recurse_cnt++; + while ( !spin_trylock_recursive(lock) ) + cpu_relax(); } void _spin_unlock_recursive(spinlock_t *lock) diff --git a/xen/include/asm-arm/system.h b/xen/include/asm-arm/system.h index f0e222f..2eb96e8 100644 --- a/xen/include/asm-arm/system.h +++ b/xen/include/asm-arm/system.h @@ -53,9 +53,6 @@ #define arch_fetch_and_add(x, v) __sync_fetch_and_add(x, v) -#define arch_lock_acquire_barrier() smp_mb() -#define arch_lock_release_barrier() smp_mb() - extern struct vcpu *__context_switch(struct vcpu *prev, struct vcpu *next); #endif diff --git a/xen/include/asm-x86/system.h b/xen/include/asm-x86/system.h index 25a6a2a..9fb70f5 100644 --- a/xen/include/asm-x86/system.h +++ b/xen/include/asm-x86/system.h @@ -185,17 +185,6 @@ static always_inline unsigned long __xadd( #define set_mb(var, value) do { xchg(&var, value); } while (0) #define set_wmb(var, value) do { var = value; wmb(); } while (0) -/* - * On x86 the only reordering is of reads with older writes. In the - * lock case, the read in observe_head() can only be reordered with - * writes that precede it, and moving a write _into_ a locked section - * is OK. In the release case, the write in add_sized() can only be - * reordered with reads that follow it, and hoisting a read _into_ a - * locked region is OK. - */ -#define arch_lock_acquire_barrier() barrier() -#define arch_lock_release_barrier() barrier() - #define local_irq_disable() asm volatile ( "cli" : : : "memory" ) #define local_irq_enable() asm volatile ( "sti" : : : "memory" ) diff --git a/xen/include/xen/spinlock.h b/xen/include/xen/spinlock.h index bafbc74..eda9b2e 100644 --- a/xen/include/xen/spinlock.h +++ b/xen/include/xen/spinlock.h @@ -80,7 +80,8 @@ struct lock_profile_qhead { static struct lock_profile *__lock_profile_##name \ __used_section(".lockprofile.data") = \ &__lock_profile_data_##name -#define _SPIN_LOCK_UNLOCKED(x) { { 0 }, 0xfffu, 0, _LOCK_DEBUG, x } +#define _SPIN_LOCK_UNLOCKED(x) { _RAW_SPIN_LOCK_UNLOCKED, 0xfffu, 0, \ + _LOCK_DEBUG, x } #define SPIN_LOCK_UNLOCKED _SPIN_LOCK_UNLOCKED(NULL) #define DEFINE_SPINLOCK(l) \ spinlock_t l = _SPIN_LOCK_UNLOCKED(NULL); \ @@ -116,7 +117,8 @@ extern void spinlock_profile_reset(unsigned char key); struct lock_profile_qhead { }; -#define SPIN_LOCK_UNLOCKED { { 0 }, 0xfffu, 0, _LOCK_DEBUG } +#define SPIN_LOCK_UNLOCKED \ + { _RAW_SPIN_LOCK_UNLOCKED, 0xfffu, 0, _LOCK_DEBUG } #define DEFINE_SPINLOCK(l) spinlock_t l = SPIN_LOCK_UNLOCKED #define spin_lock_init_prof(s, l) spin_lock_init(&((s)->l)) @@ -125,16 +127,8 @@ struct lock_profile_qhead { }; #endif -typedef union { - u32 head_tail; - struct { - u16 head; - u16 tail; - }; -} spinlock_tickets_t; - typedef struct spinlock { - spinlock_tickets_t tickets; + raw_spinlock_t raw; u16 recurse_cpu:12; u16 recurse_cnt:4; struct lock_debug debug; -- generated by git-patchbot for /home/xen/git/xen.git#master _______________________________________________ Xen-changelog mailing list Xen-changelog@xxxxxxxxxxxxx http://lists.xensource.com/xen-changelog
|
Lists.xenproject.org is hosted with RackSpace, monitoring our |