[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[Xen-devel] [PATCH v11 08/16] qspinlock: Prepare for unfair lock support



If unfair lock is supported, the lock acquisition loop at the end of
the queue_spin_lock_slowpath() function may need to detect the fact
the lock can be stolen. Code are added for the stolen lock detection.

Signed-off-by: Waiman Long <Waiman.Long@xxxxxx>
---
 kernel/locking/qspinlock.c |   26 ++++++++++++++++++--------
 1 files changed, 18 insertions(+), 8 deletions(-)

diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index 2c7abe7..ae1b19d 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -94,7 +94,7 @@ static inline struct mcs_spinlock *decode_tail(u32 tail)
  * can allow better optimization of the lock acquisition for the pending
  * bit holder.
  *
- * This internal structure is also used by the set_locked function which
+ * This internal structure is also used by the try_set_locked function which
  * is not restricted to _Q_PENDING_BITS == 8.
  */
 struct __qspinlock {
@@ -206,19 +206,21 @@ static __always_inline u32 xchg_tail(struct qspinlock 
*lock, u32 tail)
 #endif /* _Q_PENDING_BITS == 8 */
 
 /**
- * set_locked - Set the lock bit and own the lock
- * @lock: Pointer to queue spinlock structure
+ * try_set_locked - Try to set the lock bit and own the lock
+ * @lock : Pointer to queue spinlock structure
+ * Return: 1 if lock acquired, 0 otherwise
  *
  * This routine should only be called when the caller is the only one
  * entitled to acquire the lock.
  */
-static __always_inline void set_locked(struct qspinlock *lock)
+static __always_inline int try_set_locked(struct qspinlock *lock)
 {
        struct __qspinlock *l = (void *)lock;
 
        barrier();
        ACCESS_ONCE(l->locked) = _Q_LOCKED_VAL;
        barrier();
+       return 1;
 }
 
 /**
@@ -357,11 +359,12 @@ queue:
        /*
         * we're at the head of the waitqueue, wait for the owner & pending to
         * go away.
-        * Load-acquired is used here because the set_locked()
+        * Load-acquired is used here because the try_set_locked()
         * function below may not be a full memory barrier.
         *
         * *,x,y -> *,0,0
         */
+retry_queue_wait:
        while ((val = smp_load_acquire(&lock->val.counter))
                                       & _Q_LOCKED_PENDING_MASK)
                arch_mutex_cpu_relax();
@@ -378,13 +381,20 @@ queue:
         */
        for (;;) {
                if (val != tail) {
-                       set_locked(lock);
-                       break;
+                       /*
+                        * The try_set_locked function will only failed if the
+                        * lock was stolen.
+                        */
+                       if (try_set_locked(lock))
+                               break;
+                       else
+                               goto  retry_queue_wait;
                }
                old = atomic_cmpxchg(&lock->val, val, _Q_LOCKED_VAL);
                if (old == val)
                        goto release;   /* No contention */
-
+               else if (old &  _Q_LOCKED_MASK)
+                       goto retry_queue_wait;
                val = old;
        }
 
-- 
1.7.1


_______________________________________________
Xen-devel mailing list
Xen-devel@xxxxxxxxxxxxx
http://lists.xen.org/xen-devel


 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.